gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#! /usr/bin/env python3
# Copyright (c) 2017 Linaro Limited.
# Copyright (c) 2017 Open Source Foundries Limited.
#
# SPDX-License-Identifier: Apache-2.0
"""Zephyr binary runner core interfaces
This provides the core ZephyrBinaryRunner class meant for public use,
as well as some other helpers for concrete runner classes.
"""
import abc
import argparse
import errno
import logging
import os
import platform
import shlex
import shutil
import signal
import subprocess
import re
from typing import Dict, List, NamedTuple, NoReturn, Optional, Set, Type, \
Union
# Turn on to enable just logging the commands that would be run (at
# info rather than debug level), without actually running them. This
# can break runners that are expecting output or if one command
# depends on another, so it's just for debugging.
_DRY_RUN = False
_logger = logging.getLogger('runners')
class _DebugDummyPopen:
def terminate(self):
pass
def wait(self):
pass
MAX_PORT = 49151
class NetworkPortHelper:
'''Helper class for dealing with local IP network ports.'''
def get_unused_ports(self, starting_from):
'''Find unused network ports, starting at given values.
starting_from is an iterable of ports the caller would like to use.
The return value is an iterable of ports, in the same order, using
the given values if they were unused, or the next sequentially
available unused port otherwise.
Ports may be bound between this call's check and actual usage, so
callers still need to handle errors involving returned ports.'''
start = list(starting_from)
used = self._used_now()
ret = []
for desired in start:
port = desired
while port in used:
port += 1
if port > MAX_PORT:
msg = "ports above {} are in use"
raise ValueError(msg.format(desired))
used.add(port)
ret.append(port)
return ret
def _used_now(self):
handlers = {
'Windows': self._used_now_windows,
'Linux': self._used_now_linux,
'Darwin': self._used_now_darwin,
}
handler = handlers[platform.system()]
return handler()
def _used_now_windows(self):
cmd = ['netstat', '-a', '-n', '-p', 'tcp']
return self._parser_windows(cmd)
def _used_now_linux(self):
cmd = ['ss', '-a', '-n', '-t']
return self._parser_linux(cmd)
def _used_now_darwin(self):
cmd = ['netstat', '-a', '-n', '-p', 'tcp']
return self._parser_darwin(cmd)
@staticmethod
def _parser_windows(cmd):
out = subprocess.check_output(cmd).split(b'\r\n')
used_bytes = [x.split()[1].rsplit(b':', 1)[1] for x in out
if x.startswith(b' TCP')]
return {int(b) for b in used_bytes}
@staticmethod
def _parser_linux(cmd):
out = subprocess.check_output(cmd).splitlines()[1:]
used_bytes = [s.split()[3].rsplit(b':', 1)[1] for s in out]
return {int(b) for b in used_bytes}
@staticmethod
def _parser_darwin(cmd):
out = subprocess.check_output(cmd).split(b'\n')
used_bytes = [x.split()[3].rsplit(b':', 1)[1] for x in out
if x.startswith(b'tcp')]
return {int(b) for b in used_bytes}
class BuildConfiguration:
'''This helper class provides access to build-time configuration.
Configuration options can be read as if the object were a dict,
either object['CONFIG_FOO'] or object.get('CONFIG_FOO').
Kconfig configuration values are available (parsed from .config).'''
def __init__(self, build_dir: str):
self.build_dir = build_dir
self.options: Dict[str, Union[str, int]] = {}
self.path = os.path.join(self.build_dir, 'zephyr', '.config')
self._parse()
def __contains__(self, item):
return item in self.options
def __getitem__(self, item):
return self.options[item]
def get(self, option, *args):
return self.options.get(option, *args)
def getboolean(self, option):
'''If a boolean option is explicitly set to y or n,
returns its value. Otherwise, falls back to False.
'''
return self.options.get(option, False)
def _parse(self):
filename = self.path
opt_value = re.compile('^(?P<option>CONFIG_[A-Za-z0-9_]+)=(?P<value>.*)$')
not_set = re.compile('^# (?P<option>CONFIG_[A-Za-z0-9_]+) is not set$')
with open(filename, 'r') as f:
for line in f:
match = opt_value.match(line)
if match:
value = match.group('value').rstrip()
if value.startswith('"') and value.endswith('"'):
# A string literal should have the quotes stripped,
# but otherwise be left as is.
value = value[1:-1]
elif value == 'y':
# The character 'y' is a boolean option
# that is set to True.
value = True
else:
# Neither a string nor 'y', so try to parse it
# as an integer.
try:
base = 16 if value.startswith('0x') else 10
self.options[match.group('option')] = int(value, base=base)
continue
except ValueError:
pass
self.options[match.group('option')] = value
continue
match = not_set.match(line)
if match:
# '# CONFIG_FOO is not set' means a boolean option is false.
self.options[match.group('option')] = False
class MissingProgram(FileNotFoundError):
'''FileNotFoundError subclass for missing program dependencies.
No significant changes from the parent FileNotFoundError; this is
useful for explicitly signaling that the file in question is a
program that some class requires to proceed.
The filename attribute contains the missing program.'''
def __init__(self, program):
super().__init__(errno.ENOENT, os.strerror(errno.ENOENT), program)
class RunnerCaps:
'''This class represents a runner class's capabilities.
Each capability is represented as an attribute with the same
name. Flag attributes are True or False.
Available capabilities:
- commands: set of supported commands; default is {'flash',
'debug', 'debugserver', 'attach'}.
- dev_id: whether the runner supports device identifiers, in the form of an
-i, --dev-id option. This is useful when the user has multiple debuggers
connected to a single computer, in order to select which one will be used
with the command provided.
- flash_addr: whether the runner supports flashing to an
arbitrary address. Default is False. If true, the runner
must honor the --dt-flash option.
- erase: whether the runner supports an --erase option, which
does a mass-erase of the entire addressable flash on the target
before flashing. On multi-core SoCs, this may only erase portions of
flash specific the actual target core. (This option can be useful for
things like clearing out old settings values or other subsystem state
that may affect the behavior of the zephyr image. It is also sometimes
needed by SoCs which have flash-like areas that can't be sector
erased by the underlying tool before flashing; UICR on nRF SoCs
is one example.)
'''
def __init__(self,
commands: Set[str] = {'flash', 'debug',
'debugserver', 'attach'},
dev_id: bool = False,
flash_addr: bool = False,
erase: bool = False):
self.commands = commands
self.dev_id = dev_id
self.flash_addr = bool(flash_addr)
self.erase = bool(erase)
def __str__(self):
return (f'RunnerCaps(commands={self.commands}, '
f'dev_id={self.dev_id}, '
f'flash_addr={self.flash_addr}, '
f'erase={self.erase}'
')')
def _missing_cap(cls: Type['ZephyrBinaryRunner'], option: str) -> NoReturn:
# Helper function that's called when an option was given on the
# command line that corresponds to a missing capability in the
# runner class cls.
raise ValueError(f"{cls.name()} doesn't support {option} option")
class RunnerConfig(NamedTuple):
'''Runner execution-time configuration.
This is a common object shared by all runners. Individual runners
can register specific configuration options using their
do_add_parser() hooks.
'''
build_dir: str # application build directory
board_dir: str # board definition directory
elf_file: Optional[str] # zephyr.elf path, or None
hex_file: Optional[str] # zephyr.hex path, or None
bin_file: Optional[str] # zephyr.bin path, or None
gdb: Optional[str] = None # path to a usable gdb
openocd: Optional[str] = None # path to a usable openocd
openocd_search: List[str] = [] # add these paths to the openocd search path
_YN_CHOICES = ['Y', 'y', 'N', 'n', 'yes', 'no', 'YES', 'NO']
class _DTFlashAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values.lower().startswith('y'):
namespace.dt_flash = True
else:
namespace.dt_flash = False
class _ToggleAction(argparse.Action):
def __call__(self, parser, args, ignored, option):
setattr(args, self.dest, not option.startswith('--no-'))
class DeprecatedAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
_logger.warning(f'Argument {self.option_strings[0]} is deprecated, '
f'use {self._replacement} instead.')
setattr(namespace, self.dest, values)
def depr_action(*args, replacement=None, **kwargs):
action = DeprecatedAction(*args, **kwargs)
setattr(action, '_replacement', replacement)
return action
class ZephyrBinaryRunner(abc.ABC):
'''Abstract superclass for binary runners (flashers, debuggers).
**Note**: this class's API has changed relatively rarely since it
as added, but it is not considered a stable Zephyr API, and may change
without notice.
With some exceptions, boards supported by Zephyr must provide
generic means to be flashed (have a Zephyr firmware binary
permanently installed on the device for running) and debugged
(have a breakpoint debugger and program loader on a host
workstation attached to a running target).
This is supported by four top-level commands managed by the
Zephyr build system:
- 'flash': flash a previously configured binary to the board,
start execution on the target, then return.
- 'debug': connect to the board via a debugging protocol, program
the flash, then drop the user into a debugger interface with
symbol tables loaded from the current binary, and block until it
exits.
- 'debugserver': connect via a board-specific debugging protocol,
then reset and halt the target. Ensure the user is now able to
connect to a debug server with symbol tables loaded from the
binary.
- 'attach': connect to the board via a debugging protocol, then drop
the user into a debugger interface with symbol tables loaded from
the current binary, and block until it exits. Unlike 'debug', this
command does not program the flash.
This class provides an API for these commands. Every subclass is
called a 'runner' for short. Each runner has a name (like
'pyocd'), and declares commands it can handle (like
'flash'). Boards (like 'nrf52dk_nrf52832') declare which runner(s)
are compatible with them to the Zephyr build system, along with
information on how to configure the runner to work with the board.
The build system will then place enough information in the build
directory to create and use runners with this class's create()
method, which provides a command line argument parsing API. You
can also create runners by instantiating subclasses directly.
In order to define your own runner, you need to:
1. Define a ZephyrBinaryRunner subclass, and implement its
abstract methods. You may need to override capabilities().
2. Make sure the Python module defining your runner class is
imported, e.g. by editing this package's __init__.py (otherwise,
get_runners() won't work).
3. Give your runner's name to the Zephyr build system in your
board's board.cmake.
Additional advice:
- If you need to import any non-standard-library modules, make sure
to catch ImportError and defer complaints about it to a RuntimeError
if one is missing. This avoids affecting users that don't require your
runner, while still making it clear what went wrong to users that do
require it that don't have the necessary modules installed.
- If you need to ask the user something (e.g. using input()), do it
in your create() classmethod, not do_run(). That ensures your
__init__() really has everything it needs to call do_run(), and also
avoids calling input() when not instantiating within a command line
application.
- Use self.logger to log messages using the standard library's
logging API; your logger is named "runner.<your-runner-name()>"
For command-line invocation from the Zephyr build system, runners
define their own argparse-based interface through the common
add_parser() (and runner-specific do_add_parser() it delegates
to), and provide a way to create instances of themselves from
a RunnerConfig and parsed runner-specific arguments via create().
Runners use a variety of host tools and configuration values, the
user interface to which is abstracted by this class. Each runner
subclass should take any values it needs to execute one of these
commands in its constructor. The actual command execution is
handled in the run() method.'''
def __init__(self, cfg: RunnerConfig):
'''Initialize core runner state.'''
self.cfg = cfg
'''RunnerConfig for this instance.'''
self.logger = logging.getLogger('runners.{}'.format(self.name()))
'''logging.Logger for this instance.'''
@staticmethod
def get_runners() -> List[Type['ZephyrBinaryRunner']]:
'''Get a list of all currently defined runner classes.'''
return ZephyrBinaryRunner.__subclasses__()
@classmethod
@abc.abstractmethod
def name(cls) -> str:
'''Return this runner's user-visible name.
When choosing a name, pick something short and lowercase,
based on the name of the tool (like openocd, jlink, etc.) or
the target architecture/board (like xtensa etc.).'''
@classmethod
def capabilities(cls) -> RunnerCaps:
'''Returns a RunnerCaps representing this runner's capabilities.
This implementation returns the default capabilities.
Subclasses should override appropriately if needed.'''
return RunnerCaps()
@classmethod
def add_parser(cls, parser):
'''Adds a sub-command parser for this runner.
The given object, parser, is a sub-command parser from the
argparse module. For more details, refer to the documentation
for argparse.ArgumentParser.add_subparsers().
The lone common optional argument is:
* --dt-flash (if the runner capabilities includes flash_addr)
Runner-specific options are added through the do_add_parser()
hook.'''
# Unfortunately, the parser argument's type is not documented
# in typeshed, so we can't type annotate much here.
# Common options that depend on runner capabilities. If a
# capability is not supported, the option string or strings
# are added anyway, to prevent an individual runner class from
# using them to mean something else.
caps = cls.capabilities()
if caps.dev_id:
parser.add_argument('-i', '--dev-id',
dest='dev_id',
help=cls.dev_id_help())
else:
parser.add_argument('-i', '--dev-id', help=argparse.SUPPRESS)
if caps.flash_addr:
parser.add_argument('--dt-flash', default='n', choices=_YN_CHOICES,
action=_DTFlashAction,
help='''If 'yes', try to use flash address
information from devicetree when flash
addresses are unknown (e.g. when flashing a .bin)''')
else:
parser.add_argument('--dt-flash', help=argparse.SUPPRESS)
parser.add_argument('--erase', '--no-erase', nargs=0,
action=_ToggleAction,
help=("mass erase flash before loading, or don't"
if caps.erase else argparse.SUPPRESS))
# Runner-specific options.
cls.do_add_parser(parser)
@classmethod
@abc.abstractmethod
def do_add_parser(cls, parser):
'''Hook for adding runner-specific options.'''
@classmethod
def create(cls, cfg: RunnerConfig,
args: argparse.Namespace) -> 'ZephyrBinaryRunner':
'''Create an instance from command-line arguments.
- ``cfg``: runner configuration (pass to superclass __init__)
- ``args``: arguments parsed from execution environment, as
specified by ``add_parser()``.'''
caps = cls.capabilities()
if args.dev_id and not caps.dev_id:
_missing_cap(cls, '--dev-id')
if args.dt_flash and not caps.flash_addr:
_missing_cap(cls, '--dt-flash')
if args.erase and not caps.erase:
_missing_cap(cls, '--erase')
ret = cls.do_create(cfg, args)
if args.erase:
ret.logger.info('mass erase requested')
return ret
@classmethod
@abc.abstractmethod
def do_create(cls, cfg: RunnerConfig,
args: argparse.Namespace) -> 'ZephyrBinaryRunner':
'''Hook for instance creation from command line arguments.'''
@staticmethod
def get_flash_address(args: argparse.Namespace,
build_conf: BuildConfiguration,
default: int = 0x0) -> int:
'''Helper method for extracting a flash address.
If args.dt_flash is true, returns the address obtained from
ZephyrBinaryRunner.flash_address_from_build_conf(build_conf).
Otherwise (when args.dt_flash is False), the default value is
returned.'''
if args.dt_flash:
return ZephyrBinaryRunner.flash_address_from_build_conf(build_conf)
else:
return default
@staticmethod
def flash_address_from_build_conf(build_conf: BuildConfiguration):
'''If CONFIG_HAS_FLASH_LOAD_OFFSET is n in build_conf,
return the CONFIG_FLASH_BASE_ADDRESS value. Otherwise, return
CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET.
'''
if build_conf.getboolean('CONFIG_HAS_FLASH_LOAD_OFFSET'):
return (build_conf['CONFIG_FLASH_BASE_ADDRESS'] +
build_conf['CONFIG_FLASH_LOAD_OFFSET'])
else:
return build_conf['CONFIG_FLASH_BASE_ADDRESS']
def run(self, command: str, **kwargs):
'''Runs command ('flash', 'debug', 'debugserver', 'attach').
This is the main entry point to this runner.'''
caps = self.capabilities()
if command not in caps.commands:
raise ValueError('runner {} does not implement command {}'.format(
self.name(), command))
self.do_run(command, **kwargs)
@abc.abstractmethod
def do_run(self, command: str, **kwargs):
'''Concrete runner; run() delegates to this. Implement in subclasses.
In case of an unsupported command, raise a ValueError.'''
@property
def build_conf(self) -> BuildConfiguration:
'''Get a BuildConfiguration for the build directory.'''
if not hasattr(self, '_build_conf'):
self._build_conf = BuildConfiguration(self.cfg.build_dir)
return self._build_conf
@property
def thread_info_enabled(self) -> bool:
'''Returns True if self.build_conf has
CONFIG_DEBUG_THREAD_INFO enabled.
'''
return self.build_conf.getboolean('CONFIG_DEBUG_THREAD_INFO')
@classmethod
def dev_id_help(cls) -> str:
''' Get the ArgParse help text for the --dev-id option.'''
return '''Device identifier. Use it to select
which debugger, device, node or instance to
target when multiple ones are available or
connected.'''
@staticmethod
def require(program: str) -> str:
'''Require that a program is installed before proceeding.
:param program: name of the program that is required,
or path to a program binary.
If ``program`` is an absolute path to an existing program
binary, this call succeeds. Otherwise, try to find the program
by name on the system PATH.
If the program can be found, its path is returned.
Otherwise, raises MissingProgram.'''
ret = shutil.which(program)
if ret is None:
raise MissingProgram(program)
return ret
def run_server_and_client(self, server, client):
'''Run a server that ignores SIGINT, and a client that handles it.
This routine portably:
- creates a Popen object for the ``server`` command which ignores
SIGINT
- runs ``client`` in a subprocess while temporarily ignoring SIGINT
- cleans up the server after the client exits.
It's useful to e.g. open a GDB server and client.'''
server_proc = self.popen_ignore_int(server)
try:
self.run_client(client)
finally:
server_proc.terminate()
server_proc.wait()
def run_client(self, client):
'''Run a client that handles SIGINT.'''
previous = signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
self.check_call(client)
finally:
signal.signal(signal.SIGINT, previous)
def _log_cmd(self, cmd: List[str]):
escaped = ' '.join(shlex.quote(s) for s in cmd)
if not _DRY_RUN:
self.logger.debug(escaped)
else:
self.logger.info(escaped)
def call(self, cmd: List[str], **kwargs) -> int:
'''Subclass subprocess.call() wrapper.
Subclasses should use this method to run command in a
subprocess and get its return code, rather than
using subprocess directly, to keep accurate debug logs.
'''
self._log_cmd(cmd)
if _DRY_RUN:
return 0
return subprocess.call(cmd, **kwargs)
def check_call(self, cmd: List[str], **kwargs):
'''Subclass subprocess.check_call() wrapper.
Subclasses should use this method to run command in a
subprocess and check that it executed correctly, rather than
using subprocess directly, to keep accurate debug logs.
'''
self._log_cmd(cmd)
if _DRY_RUN:
return
subprocess.check_call(cmd, **kwargs)
def check_output(self, cmd: List[str], **kwargs) -> bytes:
'''Subclass subprocess.check_output() wrapper.
Subclasses should use this method to run command in a
subprocess and check that it executed correctly, rather than
using subprocess directly, to keep accurate debug logs.
'''
self._log_cmd(cmd)
if _DRY_RUN:
return b''
return subprocess.check_output(cmd, **kwargs)
def popen_ignore_int(self, cmd: List[str]) -> subprocess.Popen:
'''Spawn a child command, ensuring it ignores SIGINT.
The returned subprocess.Popen object must be manually terminated.'''
cflags = 0
preexec = None
system = platform.system()
if system == 'Windows':
# We can't type check this line on Unix operating systems:
# mypy thinks the subprocess module has no such attribute.
cflags |= subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore
elif system in {'Linux', 'Darwin'}:
# We can't type check this on Windows for the same reason.
preexec = os.setsid # type: ignore
self._log_cmd(cmd)
if _DRY_RUN:
return _DebugDummyPopen() # type: ignore
return subprocess.Popen(cmd, creationflags=cflags, preexec_fn=preexec)
def ensure_output(self, output_type: str) -> None:
'''Ensure self.cfg has a particular output artifact.
For example, ensure_output('bin') ensures that self.cfg.bin_file
refers to an existing file. Errors out if it's missing or undefined.
:param output_type: string naming the output type
'''
output_file = getattr(self.cfg, f'{output_type}_file', None)
if output_file is None:
err = f'{output_type} file location is unknown.'
elif not os.path.isfile(output_file):
err = f'{output_file} does not exist.'
else:
return
if output_type in ('elf', 'hex', 'bin'):
err += f' Try enabling CONFIG_BUILD_OUTPUT_{output_type.upper()}.'
# RuntimeError avoids a stack trace saved in run_common.
raise RuntimeError(err)
|
|
#!/usr/bin/env python
import os
import sys
import json
import time
import argparse
import ConfigParser
import logging
import logging.config
from twisted.web import server
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet import defer, reactor, ssl
from twisted.internet.defer import inlineCallbacks
from twisted.web.static import File
from radio import *
from search import Search
from database import Database
from pymongo import MongoClient
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def json_out(f):
def wrap(*args, **kwargs):
self, request = args[:2]
self.add_response_headers(request)
response = f(*args, **kwargs)
return json.dumps(response)
return wrap
class APIResource(Resource):
def __init__(self, *args):
Resource.__init__(self)
self.putChild('session', SessionHandler(*args))
self.putChild('playlists', PlaylistsHandler(*args))
self.putChild('tracks', TracksHandler(*args))
self.putChild('recommend', RecommendHandler(*args))
self.putChild('clicklog', ClicklogHandler(*args))
self.putChild('waveform', WaveformHandler(*args))
self.putChild('info', InfoHandler(*args))
class BaseHandler(Resource):
isLeaf = True
def __init__(self, config, database, search):
Resource.__init__(self)
self.config = config
self.database = database
self.search = search
def error(self, request, message, status_code):
request.setResponseCode(status_code)
return {'error': message}
def add_response_headers(self, request):
request.responseHeaders.addRawHeader('content-type', 'application/json')
# CORS headers
request.responseHeaders.addRawHeader('Access-Control-Allow-Origin', '*')
request.responseHeaders.addRawHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
request.responseHeaders.addRawHeader("Access-Control-Allow-Headers", "Authorization,X-Auth-Token,Content-Type,Accept")
@json_out
def render_OPTIONS(self, request):
return {}
def render_GET(self, request):
def finish_req(res, request):
request.write(json.dumps(res))
if not request.finished:
request.finish()
self.add_response_headers(request)
d = self._process_GET(request)
d.addCallback(finish_req, request)
return server.NOT_DONE_YET
@inlineCallbacks
def _process_GET(self, request):
defer.returnValue(self.error(request, 'Method not allowed', 405))
def render_POST(self, request):
def finish_req(res, request):
request.write(json.dumps(res))
if not request.finished:
request.finish()
self.add_response_headers(request)
d = self._process_POST(request)
d.addCallback(finish_req, request)
return server.NOT_DONE_YET
@inlineCallbacks
def _process_POST(self, request):
defer.returnValue(self.error(request, 'Method not allowed', 405))
class SessionHandler(BaseHandler):
@json_out
def render_GET(self, request):
token = self.database.create_session()
return {'token': token}
class PlaylistsHandler(BaseHandler):
@json_out
def render_GET(self, request):
token = request.args['token'][0] if 'token' in request.args else None
if token:
session = self.database.get_session(token)
if session is None:
return self.error(request, 'cannot find session', 404)
playlists = session['playlists']
for playlist_name, playlist in playlists.iteritems():
radio = self.database.find_radio(token, playlist_name)
if radio is not None:
playlist['radio_id'] = radio['_id']
return playlists
@json_out
def render_POST(self, request):
token = request.args['token'][0] if 'token' in request.args else None
session = self.database.get_session(token)
if session is None:
return self.error(request, 'cannot find session', 404)
body = request.content.read()
playlists_new = json.loads(body)
tracks_new = set((p['name'], track_id) for p in playlists_new.values() for track_id in p['tracks'])
playlists_old = session['playlists']
tracks_old = set((p['name'], t['_id']) for p in playlists_old.values() for t in p['tracks'] if t)
tracks_added = tracks_new - tracks_old
tracks_removed = tracks_old - tracks_new
check_metadata = False
for playlist_name, track_id in tracks_added:
for function in playlists_new[playlist_name].get('functions', []):
self.database.update_function_counter(track_id, function, 1)
# Check metadata for the new tracks in the identity playlist
if playlists_new[playlist_name].get('type', 'user') == 'identity':
track = self.database.get_track(track_id)
self.database.metadata_checker.check_track(track, add_sources=True)
check_metadata = True
for playlist_name, track_id in tracks_removed:
for function in playlists_old[playlist_name].get('functions', []):
self.database.update_function_counter(track_id, function, -1)
# Update radios
response = {}
for playlist_name, playlist in playlists_new.iteritems():
radio = self.database.find_radio(token, playlist_name)
radio_enabled = playlist.pop('radio_enabled', False)
if radio_enabled and radio is None:
radio_id = self.database.add_radio(token, playlist_name)
playlist['radio_id'] = radio_id
response['radios_created'] = response.get('radios_created', [])
response['radios_created'].append(radio_id)
if not radio_enabled and radio is not None:
self.database.delete_radio(radio['_id'])
response['radios_deleted'] = response.get('radios_deleted', [])
response['radios_deleted'].append(radio['_id'])
for playlist_name, playlist in playlists_old.iteritems():
if playlist_name not in playlists_new:
radio = self.database.find_radio(token, playlist_name)
if radio is not None:
self.database.delete_radio(radio['_id'])
response['radios_deleted'] = response.get('radios_deleted', [])
response['radios_deleted'].append(radio['_id'])
self.database.update_session(token, playlists_new)
# Run the metadata checker (needs to be called after update_session)
if check_metadata and not self.database.metadata_checker.checking:
self.database.metadata_checker.check_all()
return response
class TracksHandler(BaseHandler):
@inlineCallbacks
def _process_GET(self, request):
query = request.args['query'][0] if 'query' in request.args else None
id = request.args['id'][0] if 'id' in request.args else None
offset = request.args['offset'][0] if 'offset' in request.args else 0
page_size = request.args['pagesize'][0] if 'pagesize' in request.args else 0
if bool(query) == bool(id):
defer.returnValue(self.error(request, 'please use either the query or the id param', 400))
if id:
track = self.database.get_track(id)
if track is None:
defer.returnValue(self.error(request, 'track does not exist', 404))
defer.returnValue(track)
results = yield self.search.search(query)
results.sort(key=lambda x: x.get('stats', {}).get('playlisted', 0), reverse=True)
offset = int(offset)
page_size = int(page_size) or int(self.config.get('api', 'page_size'))
if offset > len(results):
defer.returnValue(self.error(request, 'offset is larger then result-set', 404))
else:
defer.returnValue({'offset': offset,
'page_size': page_size,
'total': len(results),
'results': results[offset:offset+page_size]})
class RecommendHandler(BaseHandler):
@inlineCallbacks
def _process_GET(self, request):
token = request.args['token'][0] if 'token' in request.args else None
name = request.args['name'][0] if 'name' in request.args else None
offset = request.args['offset'][0] if 'offset' in request.args else 0
page_size = request.args['pagesize'][0] if 'pagesize' in request.args else 0
session = self.database.get_session(token)
if session is None:
defer.returnValue(self.error(request, 'cannot find session', 404))
playlists = session['playlists']
ident_playlist = None
for p in playlists.itervalues():
if p.get('type', 'user') == 'identity':
ident_playlist = p
page_size = int(page_size) or int(self.config.get('api', 'page_size'))
# Get the recommendations
results = None
if ident_playlist:
offset = int(offset)
results = yield self.search.recommend(ident_playlist)
if results is None:
offset = 0
results = self.database.get_random_tracks(page_size)
# Return the recommendations
if offset > len(results):
response = self.error(request, 'offset is larger then result-set', 404)
else:
response = {'offset': offset,
'page_size': page_size,
'total': len(results),
'results': results[offset:offset+page_size]}
defer.returnValue(response)
class ClicklogHandler(BaseHandler):
@json_out
def render_GET(self, request):
app = request.args['app'][0] if 'app' in request.args else None
limit = request.args['limit'][0] if 'limit' in request.args else 0
# Make sure the user is authorized (HTTP basic authentication)
authorized = any([user['name'] == request.getUser() and user['password'] == request.getPassword() for user in self.database.get_users()])
if not authorized:
request.responseHeaders.addRawHeader('WWW-Authenticate', 'Basic realm="Billy"')
return self.error(request, 'authentication failed', 401)
clicklog = list(self.database.get_clicklog(app, int(limit)))
return clicklog
@json_out
def render_POST(self, request):
app = request.args['app'][0] if 'app' in request.args else 'billy'
body = request.content.read()
json_body = json.loads(body)
if app == 'billy':
token = request.args['token'][0] if 'token' in request.args else None
session = self.database.get_session(token)
if session is None:
return self.error(request, 'cannot find session', 404)
json_body['token'] = token
json_body['app'] = app
json_body['user-agent'] = request.getAllHeaders().get('user-agent', '')
json_body['ip'] = request.getClientIP()
json_body['time'] = int(time.time())
self.database.add_clicklog(json_body)
class WaveformHandler(BaseHandler):
@json_out
def render_GET(self, request):
id = request.args['id'][0] if 'id' in request.args else None
waveform = self.database.get_waveform(id)
if waveform is None:
return self.error(request, 'cannot find waveform', 404)
return {'waveform': waveform['waveform']}
class InfoHandler(BaseHandler):
@json_out
def render_GET(self, request):
return {'info': self.database.get_info()}
def main(argv):
parser = argparse.ArgumentParser(description='Billy API server')
try:
parser.add_argument('-p', '--port', help='Listen port', required=True)
parser.add_argument('-t', '--tracks', help='JSON formatted tracks to be imported into the database', required=False)
parser.add_argument('-s', '--sources', help='JSON formatted sources to be imported into the database', required=False)
parser.add_argument('-u', '--users', help='JSON formatted admin users to be imported into the database', required=False)
parser.add_argument('-d', '--dir', help='Directory with static content (served from http://server/billy)', required=False)
parser.add_argument('-n', '--dbname', help='Name of the MongoDB database (default: billy)', required=False)
parser.add_argument('-l', '--ssl', help='SSL key/certificate files (e.g. --ssl privkey.pem,cert.pem)', required=False)
parser.add_argument('-r', '--radio', help='Enable radio', required=False, action='store_true')
parser.add_help = True
args = parser.parse_args(sys.argv[1:])
except argparse.ArgumentError:
parser.print_help()
sys.exit(2)
logging.config.fileConfig(os.path.join(CURRENT_DIR, 'logger.conf'))
logger = logging.getLogger(__name__)
config = ConfigParser.ConfigParser()
config.read(os.path.join(CURRENT_DIR, 'billy.conf'))
database = Database(config, (args.dbname or 'billy'))
search = Search(database, config)
database.set_track_callbacks(search.index, search.update)
# Import tracks
if args.tracks:
with open(args.tracks, 'rb') as fp:
logger.info('Importing tracks')
tracks = json.load(fp)
for track in tracks:
waveform = track.pop('waveform', None)
track_id = database.add_track(track)
if track_id and waveform is not None:
database.add_waveform(track_id, waveform)
logger.info('Finished importing tracks')
# Import sources
if args.sources:
with open(args.sources, 'rb') as fp:
logger.info('Importing sources')
sources = json.load(fp)
for source in sources:
track_id = database.add_source(source)
logger.info('Finished importing sources')
# Import users
if args.users:
with open(args.users, 'rb') as fp:
logger.info('Importing users')
users = json.load(fp)
for user in users:
database.add_user(user['name'], user['password'])
logger.info('Finished importing users')
database.start_checking()
root = Resource()
if args.dir:
html_dir = os.path.abspath(args.dir)
if not os.path.exists(html_dir):
raise IOError('directory does not exist')
root.putChild('billy', File(html_dir))
# Add HTTP API
root.putChild('api', APIResource(config, database, search))
if args.radio:
# Add WS API (only used for radio)
scheme = 'ws' if not args.ssl else 'wss'
factory = BillyRadioFactory(scheme + '://127.0.0.1:' + args.port, database=database, config=config)
factory.protocol = BillyRadioProtocol
root.putChild('ws', WebSocketResource(factory))
site = Site(root)
if args.ssl:
privkey_fn, cert_fn = args.ssl.split(',')
context_factory = ssl.DefaultOpenSSLContextFactory(privkey_fn, cert_fn)
reactor.listenSSL(int(args.port), site, context_factory)
else:
reactor.listenTCP(int(args.port), site)
reactor.run()
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import urllib
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
CONF = config.CONF
class VolumesV2ClientXML(rest_client.RestClient):
"""
Client class to send CRUD Volume API requests to a Cinder endpoint
"""
TYPE = "xml"
def __init__(self, auth_provider):
super(VolumesV2ClientXML, self).__init__(auth_provider)
self.api_version = "v2"
self.service = CONF.volume.catalog_type
self.build_interval = CONF.compute.build_interval
self.build_timeout = CONF.compute.build_timeout
def _parse_volume(self, body):
vol = dict((attr, body.get(attr)) for attr in body.keys())
for child in body.getchildren():
tag = child.tag
if tag.startswith("{"):
ns, tag = tag.split("}", 1)
if tag == 'metadata':
vol['metadata'] = dict((meta.get('key'),
meta.text) for meta in
child.getchildren())
else:
vol[tag] = common.xml_to_json(child)
return vol
def get_attachment_from_volume(self, volume):
"""Return the element 'attachment' from input volumes."""
return volume['attachments']['attachment']
def _check_if_bootable(self, volume):
"""
Check if the volume is bootable, also change the value
of 'bootable' from string to boolean.
"""
# NOTE(jdg): Version 1 of Cinder API uses lc strings
# We should consider being explicit in this check to
# avoid introducing bugs like: LP #1227837
if volume['bootable'].lower() == 'true':
volume['bootable'] = True
elif volume['bootable'].lower() == 'false':
volume['bootable'] = False
else:
raise ValueError(
'bootable flag is supposed to be either True or False,'
'it is %s' % volume['bootable'])
return volume
def list_volumes(self, params=None):
"""List all the volumes created."""
url = 'volumes'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = etree.fromstring(body)
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
return resp, volumes
def list_volumes_with_detail(self, params=None):
"""List all the details of volumes."""
url = 'volumes/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = etree.fromstring(body)
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
for v in volumes:
v = self._check_if_bootable(v)
return resp, volumes
def get_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % str(volume_id)
resp, body = self.get(url)
body = self._parse_volume(etree.fromstring(body))
body = self._check_if_bootable(body)
return resp, body
def create_volume(self, size=None, **kwargs):
"""Creates a new Volume.
:param size: Size of volume in GB.
:param name: Optional Volume Name.
:param metadata: An optional dictionary of values for metadata.
:param volume_type: Optional Name of volume_type for the volume
:param snapshot_id: When specified the volume is created from
this snapshot
:param imageRef: When specified the volume is created from this
image
"""
# for bug #1293885:
# If no size specified, read volume size from CONF
if size is None:
size = CONF.volume.volume_size
# NOTE(afazekas): it should use a volume namespace
volume = common.Element("volume", xmlns=common.XMLNS_11, size=size)
if 'metadata' in kwargs:
_metadata = common.Element('metadata')
volume.append(_metadata)
for key, value in kwargs['metadata'].items():
meta = common.Element('meta')
meta.add_attr('key', key)
meta.append(common.Text(value))
_metadata.append(meta)
attr_to_add = kwargs.copy()
del attr_to_add['metadata']
else:
attr_to_add = kwargs
for key, value in attr_to_add.items():
volume.add_attr(key, value)
resp, body = self.post('volumes', str(common.Document(volume)))
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def update_volume(self, volume_id, **kwargs):
"""Updates the Specified Volume."""
put_body = common.Element("volume", xmlns=common.XMLNS_11, **kwargs)
resp, body = self.put('volumes/%s' % volume_id,
str(common.Document(put_body)))
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def delete_volume(self, volume_id):
"""Deletes the Specified Volume."""
return self.delete("volumes/%s" % str(volume_id))
def wait_for_volume_status(self, volume_id, status):
"""Waits for a Volume to reach a given status."""
resp, body = self.get_volume(volume_id)
volume_status = body['status']
start = int(time.time())
while volume_status != status:
time.sleep(self.build_interval)
resp, body = self.get_volume(volume_id)
volume_status = body['status']
if volume_status == 'error':
raise exceptions.VolumeBuildErrorException(volume_id=volume_id)
if int(time.time()) - start >= self.build_timeout:
message = 'Volume %s failed to reach %s status within '\
'the required time (%s s).' % (volume_id,
status,
self.build_timeout)
raise exceptions.TimeoutException(message)
def is_resource_deleted(self, id):
try:
self.get_volume(id)
except exceptions.NotFound:
return True
return False
def attach_volume(self, volume_id, instance_uuid, mountpoint):
"""Attaches a volume to a given instance on a given mountpoint."""
post_body = common.Element("os-attach",
instance_uuid=instance_uuid,
mountpoint=mountpoint
)
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def detach_volume(self, volume_id):
"""Detaches a volume from an instance."""
post_body = common.Element("os-detach")
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def upload_volume(self, volume_id, image_name, disk_format):
"""Uploads a volume in Glance."""
post_body = common.Element("os-volume_upload_image",
image_name=image_name,
disk_format=disk_format)
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
volume = common.xml_to_json(etree.fromstring(body))
return resp, volume
def extend_volume(self, volume_id, extend_size):
"""Extend a volume."""
post_body = common.Element("os-extend",
new_size=extend_size)
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def reset_volume_status(self, volume_id, status):
"""Reset the Specified Volume's Status."""
post_body = common.Element("os-reset_status",
status=status
)
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def volume_begin_detaching(self, volume_id):
"""Volume Begin Detaching."""
post_body = common.Element("os-begin_detaching")
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def volume_roll_detaching(self, volume_id):
"""Volume Roll Detaching."""
post_body = common.Element("os-roll_detaching")
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def reserve_volume(self, volume_id):
"""Reserves a volume."""
post_body = common.Element("os-reserve")
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def unreserve_volume(self, volume_id):
"""Restore a reserved volume ."""
post_body = common.Element("os-unreserve")
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def create_volume_transfer(self, vol_id, name=None):
"""Create a volume transfer."""
post_body = common.Element("transfer", volume_id=vol_id)
if name:
post_body.add_attr('name', name)
resp, body = self.post('os-volume-transfer',
str(common.Document(post_body)))
volume = common.xml_to_json(etree.fromstring(body))
return resp, volume
def get_volume_transfer(self, transfer_id):
"""Returns the details of a volume transfer."""
url = "os-volume-transfer/%s" % str(transfer_id)
resp, body = self.get(url)
volume = common.xml_to_json(etree.fromstring(body))
return resp, volume
def list_volume_transfers(self, params=None):
"""List all the volume transfers created."""
url = 'os-volume-transfer'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = etree.fromstring(body)
volumes = []
if body is not None:
volumes += [self._parse_volume_transfer(vol) for vol in list(body)]
return resp, volumes
def _parse_volume_transfer(self, body):
vol = dict((attr, body.get(attr)) for attr in body.keys())
for child in body.getchildren():
tag = child.tag
if tag.startswith("{"):
tag = tag.split("}", 1)
vol[tag] = common.xml_to_json(child)
return vol
def delete_volume_transfer(self, transfer_id):
"""Delete a volume transfer."""
return self.delete("os-volume-transfer/%s" % str(transfer_id))
def accept_volume_transfer(self, transfer_id, transfer_auth_key):
"""Accept a volume transfer."""
post_body = common.Element("accept", auth_key=transfer_auth_key)
url = 'os-volume-transfer/%s/accept' % transfer_id
resp, body = self.post(url, str(common.Document(post_body)))
volume = common.xml_to_json(etree.fromstring(body))
return resp, volume
def update_volume_readonly(self, volume_id, readonly):
"""Update the Specified Volume readonly."""
post_body = common.Element("os-update_readonly_flag",
readonly=readonly)
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def force_delete_volume(self, volume_id):
"""Force Delete Volume."""
post_body = common.Element("os-force_delete")
url = 'volumes/%s/action' % str(volume_id)
resp, body = self.post(url, str(common.Document(post_body)))
if body:
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def _metadata_body(self, meta):
post_body = common.Element('metadata')
for k, v in meta.items():
data = common.Element('meta', key=k)
data.append(common.Text(v))
post_body.append(data)
return post_body
def _parse_key_value(self, node):
"""Parse <foo key='key'>value</foo> data into {'key': 'value'}."""
data = {}
for node in node.getchildren():
data[node.get('key')] = node.text
return data
def create_volume_metadata(self, volume_id, metadata):
"""Create metadata for the volume."""
post_body = self._metadata_body(metadata)
resp, body = self.post('volumes/%s/metadata' % volume_id,
str(common.Document(post_body)))
body = self._parse_key_value(etree.fromstring(body))
return resp, body
def get_volume_metadata(self, volume_id):
"""Get metadata of the volume."""
url = "volumes/%s/metadata" % str(volume_id)
resp, body = self.get(url)
body = self._parse_key_value(etree.fromstring(body))
return resp, body
def update_volume_metadata(self, volume_id, metadata):
"""Update metadata for the volume."""
put_body = self._metadata_body(metadata)
url = "volumes/%s/metadata" % str(volume_id)
resp, body = self.put(url, str(common.Document(put_body)))
body = self._parse_key_value(etree.fromstring(body))
return resp, body
def update_volume_metadata_item(self, volume_id, id, meta_item):
"""Update metadata item for the volume."""
for k, v in meta_item.items():
put_body = common.Element('meta', key=k)
put_body.append(common.Text(v))
url = "volumes/%s/metadata/%s" % (str(volume_id), str(id))
resp, body = self.put(url, str(common.Document(put_body)))
body = common.xml_to_json(etree.fromstring(body))
return resp, body
def delete_volume_metadata_item(self, volume_id, id):
"""Delete metadata item for the volume."""
url = "volumes/%s/metadata/%s" % (str(volume_id), str(id))
return self.delete(url)
|
|
import pysal
__author__ = "Charles R Schmidt <[email protected]>"
class WorldToViewTransform(object):
"""
An abstract class modeling a View window.
Supports Panning, Zooming, Resizing.
Is observable.
Parameters:
worldExtent -- Extent,List -- Extent of the world, left,lower,right,upper in world coords
pixel_width -- int -- intial width of the view in pixels
pixel_height -- int -- intial height of the view in pixels
Notes:
World coordinates are expected to increase the X and Y direction.
Pixel coordinates are inverted in the Y direction.
This class helps tranform world coordinates to screen coordinates.
To transform a GraphicsMatrix,
matrix.Scale(1.0/model.scale,-1.0/model.scale)
matrix.Translate(*model.offset)
The transforms will be applied in reverse order,
The coordinates will first be translated to the origin (of the current view).
The coordinates will then be scaled.
Eg.
>>> view = WorldToViewTransform([-180,-90,180,90],500,500)
"""
def __init__(self,worldExtent,pixel_width,pixel_height):
""" Intialize the view to the extent of the world """
self.__pixel_width = float(pixel_width)
self.__pixel_height = float(pixel_height)
self.__world = worldExtent
self.extent = worldExtent
# In World Coords
def __copy__(self):
return WorldToViewTransform(self.extent,self.__pixel_width,self.__pixel_height)
copy = __copy__
def __get_offset(self):
"""
Returns the offset of the top left corner of the current view in world coords.
Move the world this many units to aling it with the view.
"""
return self.__offset
def __set_offset(self,value):
"""
Set the Offset of the top left corner in world coords.
"""
assert len(value) == 2
self.__offset = value
offset = property(fget=__get_offset,fset=__set_offset)
def __get_scale(self):
""" Returns the current scale in units/pixel """
return self.__scale
def __set_scale(self,value):
""" Sets the current scale in units/pixel """
self.__scale = value
scale = property(fget=__get_scale,fset=__set_scale)
def __get_extent(self):
"""Returns the extent of the current view in World Coordinates."""
left,upper = self.pixel_to_world(0,0)
right,lower = self.pixel_to_world(self.__pixel_width,self.__pixel_height)
return pysal.cg.Rectangle(left,lower,right,upper)
def __set_extent(self,value):
""" Set the extent of the current view in World Coordinates.
Preserve fixed scale, take the max of (sx,sy).
Use this to zoom to a sepcific region when you know the region's
bbox in world coords.
"""
left,lower,right,upper = value
width = abs(right-left)
height = abs(upper-lower)
sx = width/self.__pixel_width
sy = height/self.__pixel_height
self.__scale = max(sx,sy)
#The offset translate the world to the origin.
#The X offset + world.left == 0
#The Y offset + world.upper == 0
# Move the offset a little, so that the center of the extent is in the center of the view.
oleft = (left+(width/2.0)) - (self.__pixel_width*self.__scale/2.0)
oupper = (upper-height/2.0) + (self.__pixel_height*self.__scale/2.0)
#self.__offset = (-left,-upper) # in world coords
self.__offset = (-oleft,-oupper) # in world coords
extent = property(fget=__get_extent,fset=__set_extent)
def __get_width(self):
""" Returns the width of the current view in world coords """
return self.__pixel_width*self.scale
def __set_width(self, value):
"""
Sets the width of the current view, value in pixels
Eg.
>>> view = WorldToViewTransform([0,0,100,100],500,500)
>>> view.extent[:]
[0.0, 0.0, 100.0, 100.0]
>>> view.width = 250
>>> view.extent[:]
[0.0, 0.0, 50.0, 100.0]
"""
if self.__pixel_width != value:
self.__pixel_width = value
width = property(fget=__get_width,fset=__set_width)
def __get_height(self):
""" Returns the height of the current view in world coords """
return self.__pixel_height*self.scale
def __set_height(self, value):
"""
Sets the height of the current view, value in pixels
Eg.
>>> view = WorldToViewTransform([0,0,100,100],500,500)
>>> view.extent[:]
[0.0, 0.0, 100.0, 100.0]
>>> view.height = 250
>>> view.extent[:]
[0.0, 50.0, 100.0, 100.0]
"""
if self.__pixel_height != value:
self.__pixel_height = value
height = property(fget=__get_height,fset=__set_height)
def __get_pixel_size(self):
"""
Set and Return the current size of the view in pixels.
"""
return self.__pixel_width,self.__pixel_height
def __set_pixel_size(self,value):
w,h = value
if self.__pixel_width != w:
self.__pixel_width = w
if self.__pixel_height != h:
self.__pixel_height = h
pixel_size = property(fget=__get_pixel_size,fset=__set_pixel_size)
def pan(self,dpx,dpy):
"""
Pan the view by (dpx,dpy) pixel coordinates.
Positive deltas move the world right and down.
Negative deltas move the world left and up.
Eg.
>>> view = WorldToViewTransform([0,0,100,100],500,500)
>>> view.pan(500,0)
>>> view.extent[:]
[-100.0, 0.0, 0.0, 100.0]
>>> view.pan(-500,500)
>>> view.extent[:]
[0.0, 100.0, 100.0, 200.0]
>>> view.pan(0,-500)
>>> view.extent[:]
[0.0, 0.0, 100.0, 100.0]
>>> view.pan(490,490)
>>> view.extent[:]
[-98.0, 98.0, 2.0, 198.0]
>>> view.pan(-490,-490)
>>> view.extent[:]
[0.0, 0.0, 100.0, 100.0]
"""
ogx,ogy = self.__offset
s = self.scale
self.__offset = ogx+(dpx*s),ogy-(dpy*s)
def pan_to(self,extent):
initScale = self.scale
self.extent = extent
self.scale = initScale
def pixel_to_world(self,px,py):
"""
Returns the world coordinates of the Pixel (px,py).
Eg.
>>> view = WorldToViewTransform([0,0,100,100],500,500)
>>> view.pixel_to_world(0,0)
(0.0, 100.0)
>>> view.pixel_to_world(500,500)
(100.0, 0.0)
"""
sx = self.scale
sy = -sx
ogx,ogy = self.__offset
return px*sx - ogx, py*sy - ogy
def world_to_pixel(self,x,y):
"""
Returns the pixel of the world coordinate (x,y).
Eg.
>>> view = WorldToViewTransform([0,0,100,100],500,500)
>>> view.world_to_pixel(0,0)
(0.0, 500.0)
>>> view.world_to_pixel(100,100)
(500.0, -0.0)
"""
sx = self.scale
sy = -sx
ogx,ogy = self.__offset
return (x+ogx)/sx, (y+ogy)/sy
if __name__=="__main__":
import doctest
doctest.testmod()
view = WorldToViewTransform([0,0,100,100],500,500)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: colors.py
# author: Harold Bradley III
# email: [email protected]
# created on: 02/23/2016
#
"""
nwid.terminal.colors
~~~~~~~~~~~~~~~~~~~~
Functions for creating and combining terminal text effects and colors using
code escape sequences.
They return the modified string with the appropriate escape sequences.
"""
from __future__ import absolute_import
from . import codes as code
from . import sgr
def _combine(string, attribute, *funcs, **additional):
"""Recursively Combines multiple sgr attributes into one string using
sgr.wrap.
This is called by individual attribute functions and allows either chaining
multiple functions or passing attribute functions as arguments of other
attribute functions.
:param string: the string around which to wrap the SGR codes.
:param attribute: the code attribute to be used and combined with optional
additional attributes.
:param *funcs: optional attribute functions to be applied.
:param **additional: additional attributes to be combined with attribute.
This parameter is intended for internal use for recursion.
In **additional is additional['attributes'] which is a tuple of
attributes to be ultimately combined with sgr_wrap.
Usage::
>>> print(bold('The important string.' red, on_white))
The important string.
>>> print(bold(red(on_white('The important string.'))))
The important string.
"""
_attributes = additional['attributes'] + (attribute,) \
if 'attributes' in additional else (attribute,)
if funcs:
_additional = {'attributes' : _attributes}
_next_func, _funcs = funcs[0], funcs[1:]
return _next_func(string, *_funcs, **_additional)
else:
return sgr.wrap(string, *_attributes)
# Text effects
def normal(string, *funcs, **additional):
"""Text effect - normal. (see _combine())."""
return _combine(string, code.RESET, *funcs, **additional)
def underline(string, *funcs, **additional):
"""Text effect - underline. (see _combine())."""
return _combine(string, code.UNDERLINE, *funcs, **additional)
def bold(string, *funcs, **additional):
"""Text effect - bold. (see _combine())."""
return _combine(string, code.BOLD, *funcs, **additional)
def blink(string, *funcs, **additional):
"""Text effect - blink. (see _combine())."""
return _combine(string, code.BLINK, *funcs, **additional)
def rblink(string, *funcs, **additional):
"""Text effect - rblink. (see _combine())."""
return _combine(string, code.RBLINK, *funcs, **additional)
def reverse(string, *funcs, **additional):
"""Text effect - reverse. (see _combine())."""
return _combine(string, code.REVERSE, *funcs, **additional)
def conceal(string, *funcs, **additional):
"""Text effect - conceal. (see _combine())."""
return _combine(string, code.CONCEAL, *funcs, **additional)
# Basic colors
def black(string, *funcs, **additional):
"""Text color - black. (see _combine())."""
return _combine(string, code.BLACK, *funcs, **additional)
def red(string, *funcs, **additional):
"""Text color - red. (see _combine())."""
return _combine(string, code.RED, *funcs, **additional)
def green(string, *funcs, **additional):
"""Text color - green. (see _combine())."""
return _combine(string, code.GREEN, *funcs, **additional)
def yellow(string, *funcs, **additional):
"""Text color - yellow. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, **additional)
def blue(string, *funcs, **additional):
"""Text color - blue. (see _combine())."""
return _combine(string, code.BLUE, *funcs, **additional)
def magenta(string, *funcs, **additional):
"""Text color - magenta. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, **additional)
def cyan(string, *funcs, **additional):
"""Text color - cyan. (see _combine())."""
return _combine(string, code.CYAN, *funcs, **additional)
def white(string, *funcs, **additional):
"""Text color - white. (see _combine())."""
return _combine(string, code.WHITE, *funcs, **additional)
# Basic background colors
def bg_black(string, *funcs, **additional):
"""Text background color - black. (see _combine())."""
return _combine(string, code.BG_BLACK, *funcs, **additional)
def bg_red(string, *funcs, **additional):
"""Text background color - red. (see _combine())."""
return _combine(string, code.BG_RED, *funcs, **additional)
def bg_green(string, *funcs, **additional):
"""Text background color - green. (see _combine())."""
return _combine(string, code.BG_GREEN, *funcs, **additional)
def bg_yellow(string, *funcs, **additional):
"""Text background color - yellow. (see _combine())."""
return _combine(string, code.BG_YELLOW, *funcs, **additional)
def bg_blue(string, *funcs, **additional):
"""Text background color - blue. (see _combine())."""
return _combine(string, code.BG_BLUE, *funcs, **additional)
def bg_magenta(string, *funcs, **additional):
"""Text background color - magenta. (see _combine())."""
return _combine(string, code.BG_MAGENTA, *funcs, **additional)
def bg_cyan(string, *funcs, **additional):
"""Text background color - cyan. (see _combine())."""
return _combine(string, code.BG_CYAN, *funcs, **additional)
def bg_white(string, *funcs, **additional):
"""Text background color - white. (see _combine())."""
return _combine(string, code.BG_WHITE, *funcs, **additional)
def on_black(string, *funcs, **additional):
"""Text background color - black. (see _combine())."""
return bg_black(string, *funcs, **additional)
def on_red(string, *funcs, **additional):
"""Text background color - red. (see _combine())."""
return bg_red(string, *funcs, **additional)
def on_green(string, *funcs, **additional):
"""Text background color - green. (see _combine())."""
return bg_green(string, *funcs, **additional)
def on_yellow(string, *funcs, **additional):
"""Text background color - yellow. (see _combine())."""
return bg_yellow(string, *funcs, **additional)
def on_blue(string, *funcs, **additional):
"""Text background color - blue. (see _combine())."""
return bg_blue(string, *funcs, **additional)
def on_magenta(string, *funcs, **additional):
"""Text background color - magenta. (see _combine())."""
return bg_magenta(string, *funcs, **additional)
def on_cyan(string, *funcs, **additional):
"""Text background color - cyan. (see _combine())."""
return bg_cyan(string, *funcs, **additional)
def on_white(string, *funcs, **additional):
"""Text background color - white. (see _combine())."""
return bg_white(string, *funcs, **additional)
# Colors on a black background
def red_on_black(string, *funcs, **additional):
"""Text color - red on background color - black. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_BLACK,))
def green_on_black(string, *funcs, **additional):
"""Text color - green on background color - black. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_BLACK,))
def yellow_on_black(string, *funcs, **additional):
"""Text color - yellow on background color - black. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_BLACK,))
def blue_on_black(string, *funcs, **additional):
"""Text color - blue on background color - black. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_BLACK,))
def magenta_on_black(string, *funcs, **additional):
"""Text color - magenta on background color - black. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_BLACK,))
def cyan_on_black(string, *funcs, **additional):
"""Text color - cyan on background color - black. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_BLACK,))
def white_on_black(string, *funcs, **additional):
"""Text color - white on background color - black. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_BLACK,))
# Colors on a red background
def black_on_red(string, *funcs, **additional):
"""Text color - black on background color - red. (see _combine())."""
return _combine(string, code.BLACK, *funcs, attributes=(code.BG_RED,))
def green_on_red(string, *funcs, **additional):
"""Text color - green on background color - red. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_RED,))
def yellow_on_red(string, *funcs, **additional):
"""Text color - yellow on background color - red. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_RED,))
def blue_on_red(string, *funcs, **additional):
"""Text color - blue on background color - red. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_RED,))
def magenta_on_red(string, *funcs, **additional):
"""Text color - magenta on background color - red. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_RED,))
def cyan_on_red(string, *funcs, **additional):
"""Text color - cyan on background color - red. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_RED,))
def white_on_red(string, *funcs, **additional):
"""Text color - white on background color - red. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_RED,))
# Colors on a green background
def black_on_green(string, *funcs, **additional):
"""Text color - black on background color - green. (see _combine())."""
return _combine(string, code.BLACK, *funcs, attributes=(code.BG_GREEN,))
def red_on_green(string, *funcs, **additional):
"""Text color - red on background color - green. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_GREEN,))
def yellow_on_green(string, *funcs, **additional):
"""Text color - yellow on background color - green. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_GREEN,))
def blue_on_green(string, *funcs, **additional):
"""Text color - blue on background color - green. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_GREEN,))
def magenta_on_green(string, *funcs, **additional):
"""Text color - magenta on background color - green. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_GREEN,))
def cyan_on_green(string, *funcs, **additional):
"""Text color - cyan on background color - green. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_GREEN,))
def white_on_green(string, *funcs, **additional):
"""Text color - white on background color - green. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_GREEN,))
# Colors on a yellow background
def black_on_yellow(string, *func, **additional):
"""Text color - black on background color - yellow. (see _combine())."""
return _combine(string, code.BLACK, *func, attributes=(code.BG_YELLOW,))
def red_on_yellow(string, *funcs, **additional):
"""Text color - red on background color - yellow. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_YELLOW,))
def green_on_yellow(string, *funcs, **additional):
"""Text color - green on background color - yellow. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_YELLOW,))
def blue_on_yellow(string, *funcs, **additional):
"""Text color - blue on background color - yellow. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_YELLOW,))
def magenta_on_yellow(string, *funcs, **additional):
"""Text color - magenta on background color - yellow. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_YELLOW,))
def cyan_on_yellow(string, *funcs, **additional):
"""Text color - cyan on background color - yellow. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_YELLOW,))
def white_on_yellow(string, *funcs, **additional):
"""Text color - white on background color - yellow. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_YELLOW,))
# Colors on a blue background
def black_on_blue(string, *funcs, **additional):
"""Text color - black on background color - blue. (see _combine())."""
return _combine(string, code.BLACK, *funcs, attributes=(code.BG_BLUE,))
def red_on_blue(string, *funcs, **additional):
"""Text color - red on background color - blue. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_BLUE,))
def green_on_blue(string, *funcs, **additional):
"""Text color - green on background color - blue. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_BLUE,))
def yellow_on_blue(string, *funcs, **additional):
"""Text color - yellow on background color - blue. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_BLUE,))
def magenta_on_blue(string, *funcs, **additional):
"""Text color - magenta on background color - blue. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_BLUE,))
def cyan_on_blue(string, *funcs, **additional):
"""Text color - cyan on background color - blue. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_BLUE,))
def white_on_blue(string, *funcs, **additional):
"""Text color - white on background color - blue. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_BLUE,))
# Colors on a magenta background
def black_on_magenta(string, *funcs, **additional):
"""Text color - black on background color - magenta. (see _combine())."""
return _combine(string, code.BLACK, *funcs, attributes=(code.BG_MAGENTA,))
def red_on_magenta(string, *funcs, **additional):
"""Text color - red on background color - magenta. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_MAGENTA,))
def green_on_magenta(string, *funcs, **additional):
"""Text color - green on background color - magenta. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_MAGENTA,))
def yellow_on_magenta(string, *funcs, **additional):
"""Text color - yellow on background color - magenta. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_MAGENTA,))
def blue_on_magenta(string, *funcs, **additional):
"""Text color - blue on background color - magenta. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_MAGENTA,))
def cyan_on_magenta(string, *funcs, **additional):
"""Text color - cyan on background color - magenta. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_MAGENTA,))
def white_on_magenta(string, *funcs, **additional):
"""Text color - white on background color - magenta. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_MAGENTA,))
# Colors on a cyan background
def black_on_cyan(string, *funcs, **additional):
"""Text color - black on background color - cyan. (see _combine())."""
return _combine(string, code.BLACK, *funcs, attributes=(code.BG_CYAN,))
def red_on_cyan(string, *funcs, **additional):
"""Text color - red on background color - cyan. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_CYAN,))
def green_on_cyan(string, *funcs, **additional):
"""Text color - green on background color - cyan. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_CYAN,))
def yellow_on_cyan(string, *funcs, **additional):
"""Text color - yellow on background color - cyan. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_CYAN,))
def blue_on_cyan(string, *funcs, **additional):
"""Text color - blue on background color - cyan. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_CYAN,))
def magenta_on_cyan(string, *funcs, **additional):
"""Text color - magenta on background color - cyan. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_CYAN,))
def white_on_cyan(string, *funcs, **additional):
"""Text color - white on background color - cyan. (see _combine())."""
return _combine(string, code.WHITE, *funcs, attributes=(code.BG_CYAN,))
# Colors on a white background
def black_on_white(string, *funcs, **additional):
"""Text color - black on background color - white. (see _combine())."""
return _combine(string, code.BLACK, *funcs, attributes=(code.BG_WHITE,))
def red_on_white(string, *funcs, **additional):
"""Text color - red on background color - white. (see _combine())."""
return _combine(string, code.RED, *funcs, attributes=(code.BG_WHITE,))
def green_on_white(string, *funcs, **additional):
"""Text color - green on background color - white. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_WHITE,))
def yellow_on_white(string, *funcs, **additional):
"""Text color - yellow on background color - white. (see _combine())."""
return _combine(string, code.YELLOW, *funcs, attributes=(code.BG_WHITE,))
def blue_on_white(string, *funcs, **additional):
"""Text color - blue on background color - white. (see _combine())."""
return _combine(string, code.BLUE, *funcs, attributes=(code.BG_WHITE,))
def magenta_on_white(string, *funcs, **additional):
"""Text color - magenta on background color - white. (see _combine())."""
return _combine(string, code.MAGENTA, *funcs, attributes=(code.BG_WHITE,))
def cyan_on_white(string, *funcs, **additional):
"""Text color - cyan on background color - white. (see _combine())."""
return _combine(string, code.CYAN, *funcs, attributes=(code.BG_WHITE,))
|
|
import bpy
import datetime
from . properties import *
from . operators import *
from . sun_calc import Degrees, format_lat_long, degToRad, \
format_time, format_hms, Move_sun
#---------------------------------------------------------------------------
#
# Draw the Sun Panel, sliders, et. al.
#
#---------------------------------------------------------------------------
class SunPos_Panel(bpy.types.Panel):
bl_idname = "panel.SunPos_world"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "world"
bl_label = "Sun Position and Power"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
if context.area.type == 'PROPERTIES':
return 1
return 0
def enable(self, layout):
row = layout.row()
split = row.split(percentage=.90)
colL = split.column()
colL.alignment = 'LEFT'
colR = split.column()
colR.alignment = 'RIGHT'
colL.operator('world.sunpos_controller', 'Enable', icon='PLAY')
colR.operator('world.sunpos_preferences', '', icon='PREFERENCES')
Map.init_zoom_preference = True
def disable(self, context, layout):
p = context.scene.SunPos_pref_property
if Map.init_zoom_preference:
Map.zoom_preferences(bpy.context.user_preferences.inputs.invert_zoom_wheel,
bpy.context.user_preferences.inputs.invert_mouse_zoom)
Hdr.zoom_preferences(bpy.context.user_preferences.inputs.invert_zoom_wheel,
bpy.context.user_preferences.inputs.invert_mouse_zoom)
row = self.layout.row()
if p.UseOneColumn:
col1 = row.column()
col2 = col1
elif p.UseTimePlace:
split = row.split(percentage=.3)
col1 = split.column()
col2 = split.column()
else:
col1 = row.column()
col1.operator('world.sunpos_controller', 'Disable', icon='X')
if p.UseTimePlace:
col2.operator_menu_enum('world.pdp_operator',
'timePlacePresets', text=Sun.PlaceLabel)
def show_preferences(self, context, layout):
p = context.scene.SunPos_pref_property
box = self.layout.box()
row = box.row(align=True)
row.alignment = 'CENTER'
row.label(text="Preferences")
col = box.column(align=True)
col.label(text="Usage mode:")
col.props_enum(p, "UsageMode")
col.separator()
if p.UsageMode == "NORMAL":
cb = col.box()
tr = cb.row()
rr = tr.row(align=True)
cs = rr.split()
cl = cs.column()
cr = cs.column()
cl.label(text="World map options:")
cl.operator_menu_enum('world.wmp_operator',
'mapPresets', text=Sun.MapName)
cr.label(text="Display map in:")
cr.props_enum(p, "MapLocation")
col.separator()
col.label(text="Show or use:")
col.alignment = 'LEFT'
col.prop(p, "UseOneColumn", text="Single column mode")
col.prop(p, "UseTimePlace", text="Time/place presets")
col.prop(p, "UseObjectGroup", text="Object group")
col.prop(p, "ShowDMS", text="D\xb0 M' S\"")
col.prop(p, "ShowNorth", text="North offset")
col.prop(p, "ShowRefraction", text="Refraction")
col.prop(p, "ShowAzEl", text="Azimuth, elevation")
col.prop(p, "ShowDST", text="Daylight savings time")
col.prop(p, "ShowRiseSet", text="Sunrise, sunset")
col.separator()
col.operator('world.sunpos_pref_done', 'Done', icon='QUIT')
Sun.ShowRiseSet = p.ShowRiseSet
def draw(self, context):
sp = context.scene.SunPos_property
p = context.scene.SunPos_pref_property
layout = self.layout
if Display.PREFERENCES:
self.show_preferences(context, layout)
elif Display.ENABLE:
Sun.SP = sp
Sun.PP = p
self.enable(layout)
else:
Sun.SP = sp
Sun.PP = p
self.disable(context, layout)
if Display.PANEL:
if Sun.SP.IsActive:
self.draw_panel(context, sp, p, layout)
else:
Display.setAction('ENABLE')
def draw_panel(self, context, sp, p, layout):
if p.UsageMode == "HDR":
self.draw_environ_panel(context, sp, p, layout)
elif p.UseOneColumn:
self.draw_one_column(context, sp, p, layout)
else:
self.draw_two_columns(context, sp, p, layout)
def draw_environ_panel(self, context, sp, p, layout):
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
have_texture = False
try:
col.separator()
col.label(text="Use environment texture:")
col.prop_search(sp, "HDR_texture",
context.scene.world.node_tree, "nodes", text="")
col.separator()
try:
nt = bpy.context.scene.world.node_tree.nodes
envTex = nt.get(sp.HDR_texture)
if envTex:
if envTex.type == "TEX_ENVIRONMENT":
if envTex.image != None:
have_texture = True
if Sun.Bind.azDiff == 0:
if envTex.texture_mapping.rotation.z == 0.0:
Sun.Bind.azDiff = degToRad(90.0)
except:
pass
except:
pass
try:
col.label(text="Use sun object:")
col.prop_search(sp, "SunObject",
context.scene, "objects", text="")
Sun.SunObject = sp.SunObject
except:
pass
col.separator()
col.prop(sp, "SunDistance")
if not sp.BindToSun:
col.prop(sp, "HDR_elevation")
col.prop(sp, "HDR_azimuth")
col.separator()
toprow1 = box.row()
row1 = toprow1.row(align=False)
row1.alignment = 'CENTER'
if not sp.BindToSun:
row1.prop(sp, "BindToSun", toggle=True, icon="CONSTRAINT",
text="Bind Texture to Sun ")
else:
row1.prop(sp, "BindToSun", toggle=True, icon="CONSTRAINT",
text="Release binding")
toprow2 = box.row()
row2 = toprow2.row(align=False)
row2.alignment = 'CENTER'
row2.prop(sp, "ShowHdr", text="Sync Sun to Texture", toggle=True, icon='LAMP_SUN')
if have_texture == False:
row2.enabled = False
elif sp.BindToSun:
row2.enabled = False
else:
row2.enabled = True
if have_texture == False:
row1.enabled = False
elif sp.ShowHdr:
row1.enabled = False
else:
row1.enabled = True
def draw_one_column(self, context, sp, p, layout):
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.prop(sp, "UseSkyTexture", text="Cycles sky")
if sp.UseSkyTexture:
try:
col.prop_search(sp, "SkyTexture",
context.scene.world.node_tree, "nodes", text="")
except:
pass
col.prop(sp, "UseSunObject", text="Use object")
if(sp.UseSunObject):
try:
col.prop_search(sp, "SunObject",
context.scene, "objects", text="")
except:
pass
if p.UseObjectGroup:
col.prop(sp, "UseObjectGroup", text="Object group")
if sp.UseObjectGroup:
Sun.verify_ObjectGroup()
if len(Sun.Selected_objects) > 0:
col.operator('world.sunpos_clear_objects',
'Release Group')
col.separator()
if(sp.ObjectGroup == 'ECLIPTIC'):
col.prop(sp, "TimeSpread")
col.props_enum(sp, "ObjectGroup")
else:
col.operator('world.sunpos_set_objects',
'Set Object Group')
else:
Sun.ObjectGroup_verified = False
row = layout.row()
row.prop(sp, "ShowMap", text="Show Map", toggle=True, icon='WORLD')
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=True)
row.alignment = 'CENTER'
col = row.column(align=True)
col.alignment = 'CENTER'
col.prop(sp, "Latitude")
if p.ShowDMS:
col.label(text=format_lat_long(sp.Latitude, True))
col.prop(sp, "Longitude")
if p.ShowDMS:
col.label(text=format_lat_long(sp.Longitude, False))
cb = col.box()
tr = cb.row()
rr = tr.row(align=True)
if p.ShowNorth:
cs = rr.split()
cl = cs.column()
cr = cs.column()
cl.prop(sp, "ShowNorth", text="Show North", toggle=True)
cr.prop(sp, "NorthOffset")
col.prop(sp, "SunDistance")
else:
rr.prop(sp, "SunDistance")
if p.ShowRefraction:
col.prop(sp, "ShowRefraction", text="Show refraction")
if p.ShowAzEl:
col.label(text="Azimuth: " +
str(round(Sun.Azimuth, 3)) + Degrees)
col.label(text="Elevation: " +
str(round(Sun.Elevation, 3)) + Degrees)
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=False)
col.alignment = 'CENTER'
tr = col.row()
rr = tr.row(align=True)
if Sun.UseDayMonth:
cs = rr.split(percentage=.82)
cl = cs.column()
cl.alignment = 'LEFT'
cr = cs.column()
cr.alignment = 'RIGHT'
cl.prop(sp, "Month")
cr.operator('world.sunpos_day_range', '',
icon='SORTTIME')
col.prop(sp, "Day")
else:
cs = rr.split(percentage=.90)
cl = cs.column()
cr = cs.column()
cl.alignment = 'LEFT'
cr.alignment = 'RIGHT'
cl.prop(sp, "Day_of_year")
cr.operator('world.sunpos_day_range', '',
icon='SORTTIME')
col.prop(sp, "Year")
col.prop(sp, "UTCzone", slider=True)
col.prop(sp, "Time")
lt, ut = format_time(sp.Time, sp.UTCzone,
sp.DaylightSavings, sp.Longitude)
col.label(text=lt, icon='TIME')
col.label(text=" " + ut, icon='PREVIEW_RANGE')
if p.ShowRiseSet:
if Sun.Sunrise.time == Sun.Sunset.time or \
Sun.Sunrise.elevation > -0.4 or Sun.Sunset.elevation > -0.4:
Sun.RiseSetOK = False
tsr = "Sunrise: --------"
tss = " Sunset: --------"
else:
Sun.RiseSetOK = True
sr = format_hms(Sun.Sunrise.time)
ss = format_hms(Sun.Sunset.time)
tsr = "Sunrise: " + sr
tss = " Sunset: " + ss
col.label(text=tsr, icon='LAMP_SUN')
col.label(text=tss, icon='SOLO_ON')
if p.ShowDST:
col.prop(sp, "DaylightSavings", text="Daylight Savings")
def draw_two_columns(self, context, sp, p, layout):
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=True)
row.alignment = 'CENTER'
col = row.column(align=True)
split = col.split(percentage=.5)
cL = split.column()
cR = split.column()
cL.alignment = 'LEFT'
cR.alignment = 'RIGHT'
cLi = cRi = 1
cL.prop(sp, "UseSkyTexture", text="Cycles sky")
if sp.UseSkyTexture:
try:
cL.prop_search(sp, "SkyTexture",
context.scene.world.node_tree, "nodes", text="")
cLi += 1
except:
pass
cR.prop(sp, "UseSunObject", text="Use object")
if(sp.UseSunObject):
try:
cR.prop_search(sp, "SunObject",
context.scene, "objects", text="")
cRi += 1
except:
pass
if p.UseObjectGroup:
cLi += 1
cL.prop(sp, "UseObjectGroup", text="Object group")
if sp.UseObjectGroup:
Sun.verify_ObjectGroup()
if len(Sun.Selected_objects) > 0:
while cRi < cLi:
cR.label(text=" ")
cRi += 1
cL.operator('world.sunpos_clear_objects',
'Release Group')
cL.label(text=" ")
if(sp.ObjectGroup == 'ECLIPTIC'):
cR.prop(sp, "TimeSpread")
cR.props_enum(sp, "ObjectGroup")
else:
cL.operator('world.sunpos_set_objects',
'Set Object Group')
else:
Sun.ObjectGroup_verified = False
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=False)
col.prop(sp, "ShowMap", text="Show Map", toggle=True, icon='WORLD')
distanceSet = False
if p.ShowDMS:
split = col.split(percentage=.5)
cL = split.column()
cR = split.column()
cL.alignment = 'LEFT'
cR.alignment = 'RIGHT'
cL.prop(sp, "Latitude")
cR.label(text=format_lat_long(sp.Latitude, True))
cL.prop(sp, "Longitude")
cR.label(text=format_lat_long(sp.Longitude, False))
if p.ShowNorth:
cL.prop(sp, "ShowNorth", text="Show North", toggle=True)
cR.prop(sp, "NorthOffset")
if p.ShowAzEl:
cL.label(text="Azimuth: " +
str(round(Sun.Azimuth, 3)) + Degrees)
cR.label(text="Elevation: " +
str(round(Sun.Elevation, 3)) + Degrees)
if p.ShowRefraction:
cL.prop(sp, "ShowRefraction", text="Show refraction")
cR.prop(sp, "SunDistance")
distanceSet = True
else:
cb = col.box()
tr = cb.row()
rr = tr.row(align=True)
cs = rr.split()
cL = cs.column(align=True)
cR = cs.column(align=True)
cL.prop(sp, "Latitude")
cR.prop(sp, "Longitude")
if p.ShowNorth:
col.separator()
cL.prop(sp, "ShowNorth", text="Show North", toggle=True)
cR.prop(sp, "NorthOffset")
if p.ShowAzEl:
cL.label(text="Azimuth: " +
str(round(Sun.Azimuth, 3)) + Degrees)
cR.label(text="Elevation: " +
str(round(Sun.Elevation, 3)) + Degrees)
if p.ShowRefraction:
cL.prop(sp, "ShowRefraction", text="Show refraction")
cR.prop(sp, "SunDistance")
distanceSet = True
if not distanceSet:
col.prop(sp, "SunDistance")
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
if Sun.UseDayMonth:
split = row.split(percentage=.5)
colL = split.column()
colMid = split.column()
colMsplit = colMid.split(percentage=.82)
colM = colMsplit.column()
colR = colMsplit.column()
colL.prop(sp, "Month")
colM.prop(sp, "Day")
colR.operator('world.sunpos_day_range', '',
icon='SORTTIME')
else:
split = row.split(percentage=.50)
colL = split.column()
colL.alignment = 'LEFT'
colMid = split.column()
colMsplit = colMid.split(percentage=.90)
colM = colMsplit.column()
colR = colM.column()
colR.alignment = 'RIGHT'
colL.prop(sp, "Day_of_year")
colR.operator('world.sunpos_day_range', '',
icon='SORTTIME')
colL.prop(sp, "Year")
colM.prop(sp, "UTCzone", slider=True)
lt, ut = format_time(sp.Time,
sp.UTCzone,
sp.DaylightSavings,
sp.Longitude)
colL.prop(sp, "Time")
colM.label(text=lt, icon='TIME')
if p.ShowDST:
colL.prop(sp, "DaylightSavings", text="Daylight Savings")
colM.label(text=" " + ut, icon='PREVIEW_RANGE')
if p.ShowRiseSet:
if Sun.Sunrise.time == Sun.Sunset.time or \
Sun.Sunrise.elevation > -0.4 or Sun.Sunset.elevation > -0.4:
Sun.RiseSetOK = False
tsr = "Sunrise: --------"
tss = " Sunset: --------"
else:
Sun.RiseSetOK = True
sr = format_hms(Sun.Sunrise.time)
ss = format_hms(Sun.Sunset.time)
tsr = "Sunrise: " + sr
tss = " Sunset: " + ss
colL.label(text=tsr, icon='LAMP_SUN')
if p.ShowDST:
colM.label(text=tss, icon='SOLO_ON')
else:
colL.label(text=tss, icon='SOLO_ON')
### SUN POWER ###
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
split = col.split(percentage=.5)
cL = split.column()
cR = split.column()
cL.alignment = 'LEFT'
cR.alignment = 'RIGHT'
cLi = cRi = 1
cR.prop(sp, "ShowPowerOnObject", text="Show power on object")
if(sp.ShowPowerOnObject):
cR.prop_search(sp, "PowerShowObject",
context.scene, "objects", text="")
cRi += 1
if sp.PowerShowObject:
Sun.check_power_obj(sp.PowerShowObject)
else:
Sun.PowerObject_verified = False
else:
Sun.PowerObject_verified = False
cL.prop(sp, "Efficiency", text="Efficiency %")
cRi += 1
cL.prop(sp, "EffectiveAngle", text="Effective angle for get power")
cRi += 1
cL.prop(sp, "SizeSunPowerObject", text="Size of house")
cLi += 1
cL.operator('sunpos.create_new_obj', 'Create')
box = self.layout.box()
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.label(text="Export Sun Power into Yml file")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.prop(sp, "FileTempOutSide", text="file temperature outside csv")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.prop(sp, "ExportThermoResultsFile", text="Export File csv")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.prop(sp, "TimeTick", text="Time period calc, sec")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
split = col.split(percentage=.5)
cL = split.column()
cR = split.column()
cL.alignment = 'LEFT'
cR.alignment = 'RIGHT'
cLi = cRi = 1
cR.prop(sp, "ExportMonthTo", text="Exp Month To")
cL.prop(sp, "ExportMonthFrom", text="Exp Month From")
cRi += 1
cLi += 1
cR.prop(sp, "ExportDayTo", text="Exp Day To")
cL.prop(sp, "ExportDayFrom", text="Exp Day From")
cRi += 1
cLi += 1
cR.prop(sp, "ExportYearTo", text="Exp Year To")
cL.prop(sp, "ExportYearFrom", text="Exp Year From")
cRi += 1
cLi += 1
cR.prop(sp, "PowerHeatInside", text="Ext power of heat inside")
cL.prop(sp, "ExtMassInside", text="Ext mass inside")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.prop(sp, "StartTempInside", text="Start temperature inside")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.prop(sp, "ListWorkFaces", text="numbers of work faces divided by comma")
toprow = box.row()
row = toprow.row(align=False)
row.alignment = 'CENTER'
col = row.column(align=True)
col.operator('sunpos.start_calc_table', 'Start Calc')
############################################################################
class SunPos_OT_Preferences(bpy.types.Operator):
bl_idname = "world.sunpos_preferences"
bl_label = "Set preferences"
bl_description = "Press to set your preferences"
def execute(self, context):
Display.setAction('PREFERENCES')
return {'FINISHED'}
class SunPos_OT_PreferencesDone(bpy.types.Operator):
bl_idname = "world.sunpos_pref_done"
bl_label = "Preferences done"
bl_description = "Press to complete your preferences"
def execute(self, context):
Display.setAction('ENABLE')
p = context.scene.SunPos_pref_property
Sun.UsageMode = p.UsageMode
Sun.MapLocation = p.MapLocation
if not p.UseObjectGroup:
sp = context.scene.SunPos_property
sp.UseObjectGroup = False
Sun.UseObjectGroup = False
return {'FINISHED'}
class SunPos_OT_DayRange(bpy.types.Operator):
bl_idname = "world.sunpos_day_range"
bl_label = "toggleDayRange"
bl_description = "Toggle day or (month / day) range"
def execute(self, context):
sp = context.scene.SunPos_property
if Sun.UseDayMonth:
try:
dt = datetime.date(sp.Year, sp.Month, sp.Day)
sp.Day_of_year = dt.timetuple().tm_yday
except:
pass
Sun.UseDayMonth = False
else:
Sun.UseDayMonth = True
dt = (datetime.date(sp.Year, 1, 1) +
datetime.timedelta(sp.Day_of_year - 1))
sp.Day = dt.day
sp.Month = dt.month
return {'FINISHED'}
class SunPos_OT_SetObjectGroup(bpy.types.Operator):
bl_idname = "world.sunpos_set_objects"
bl_label = "Set object group"
bl_description = "Set currently selected objects as object group"
def execute(self, context):
del Sun.Selected_objects[:]
del Sun.Selected_names[:]
if (len(bpy.context.selected_objects) > 0):
Sun.Selected_names = [x.name for x in bpy.context.selected_objects]
Sun.Selected_objects = bpy.context.selected_objects
bpy.ops.object.select_all(action='DESELECT')
Move_sun()
else:
self.report({'WARNING'}, "No objects selected")
return {'FINISHED'}
class SunPos_OT_ClearObjectGroup(bpy.types.Operator):
bl_idname = "world.sunpos_clear_objects"
bl_label = "Release object group"
bl_description = "Release object group"
def execute(self, context):
bpy.ops.object.select_all(action='DESELECT')
Sun.ObjectGroup_verified = False
Sun.verify_ObjectGroup()
try:
for x in Sun.Selected_objects:
x.select = True
except:
pass
del Sun.Selected_objects[:]
del Sun.Selected_names[:]
return {'FINISHED'}
# ---------------------------------------------------------------------------
# Choice List of places, month and day at 12:00 noon
# ---------------------------------------------------------------------------
class SunPos_OT_TimePlace(bpy.types.Operator):
bl_idname = "world.pdp_operator"
bl_label = "Place & Day Presets"
#----------- Description --------- M D UTC Lat Long DaySav
pdp = [["North Pole, Summer Solstice", 6, 21, 0, 90.000, 0.0000, False],
["Equator, Vernal Equinox", 3, 20, 0, 0.0000, 0.0000, False],
["Rio de Janeiro, May 10th", 5, 10, 3, -22.9002, -43.2334, False],
["Tokyo, August 20th", 8, 20, 9, 35.7002, 139.7669, False],
["Boston, Autumnal Equinox", 9, 22, 5, 42.3502, -71.0500, True],
["Boston, Vernal Equinox", 3, 20, 5, 42.3502, -71.0500, True],
["Honolulu, Winter Solstice", 12, 21, 10, 21.3001, -157.850, False],
["Honolulu, Summer Solstice", 6, 21, 10, 21.3001, -157.850, False]]
from bpy.props import EnumProperty
timePlacePresets = EnumProperty(
name="Time & place presets",
description="Preset Place & Day",
items=(
("7", pdp[7][0], ""),
("6", pdp[6][0], ""),
("5", pdp[5][0], ""),
("4", pdp[4][0], ""),
("3", pdp[3][0], ""),
("2", pdp[2][0], ""),
("1", pdp[1][0], ""),
("0", pdp[0][0], ""),
),
default="4")
def execute(self, context):
sp = context.scene.SunPos_property
pdp = self.pdp
i = int(self.properties.timePlacePresets)
it = pdp[i]
Sun.PlaceLabel = it[0]
sp.Month = it[1]
sp.Day = it[2]
sp.Time = 12.00
sp.UTCzone = it[3]
sp.Latitude = it[4]
sp.Longitude = it[5]
sp.DaylightSavings = it[6]
dt = datetime.date(sp.Year, sp.Month, sp.Day)
sp.Day_of_year = dt.timetuple().tm_yday
# Force screen update
Display.refresh()
return {'FINISHED'}
# ---------------------------------------------------------------------------
# Choice List of world maps
# ---------------------------------------------------------------------------
class SunPos_OT_MapChoice(bpy.types.Operator):
bl_idname = "world.wmp_operator"
bl_label = "World map files"
wmp = [["1536 x 768", "WorldMap.jpg"],
["768 x 384", "WorldMapLR.jpg"],
["512 x 256", "WorldMapLLR.jpg"],
["Textureless", "None"]]
from bpy.props import EnumProperty
mapPresets = EnumProperty(
name="World map presets",
description="world map files",
items=(
("3", wmp[3][0], ""),
("2", wmp[2][0], ""),
("1", wmp[1][0], ""),
("0", wmp[0][0], ""),
),
default="2")
def execute(self, context):
sp = context.scene.SunPos_property
wmp = self.wmp
i = int(self.properties.mapPresets)
sp.MapName = wmp[i]
Sun.MapName = wmp[i][1]
return {'FINISHED'}
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8944
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for coordinate-related functionality.
This is generally just wrapping around the object-oriented coordinates
framework, but it is useful for some users who are used to more functional
interfaces.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .. import units as u
from ..constants import c
from .. import _erfa as erfa
from ..io import ascii
from ..utils import isiterable, data
from .sky_coordinate import SkyCoord
from .builtin_frames import GCRS, PrecessedGeocentric
from .representation import SphericalRepresentation, CartesianRepresentation
from .builtin_frames.utils import get_jd12
__all__ = ['cartesian_to_spherical', 'spherical_to_cartesian', 'get_sun',
'concatenate', 'get_constellation']
def cartesian_to_spherical(x, y, z):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
Note that the resulting angles are latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This function simply wraps functionality provided by the
`~astropy.coordinates.CartesianRepresentation` and
`~astropy.coordinates.SphericalRepresentation` classes. In general,
for both performance and readability, we suggest using these classes
directly. But for situations where a quick one-off conversion makes
sense, this function is provided.
Parameters
----------
x : scalar, array-like, or `~astropy.units.Quantity`
The first cartesian coordinate.
y : scalar, array-like, or `~astropy.units.Quantity`
The second cartesian coordinate.
z : scalar, array-like, or `~astropy.units.Quantity`
The third cartesian coordinate.
Returns
-------
r : `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : `~astropy.units.Quantity`
The latitude in radians
lon : `~astropy.units.Quantity`
The longitude in radians
"""
if not hasattr(x, 'unit'):
x = x * u.dimensionless_unscaled
if not hasattr(y, 'unit'):
y = y * u.dimensionless_unscaled
if not hasattr(z, 'unit'):
z = z * u.dimensionless_unscaled
cart = CartesianRepresentation(x, y, z)
sph = cart.represent_as(SphericalRepresentation)
return sph.distance, sph.lat, sph.lon
def spherical_to_cartesian(r, lat, lon):
"""
Converts spherical polar coordinates to rectangular cartesian
coordinates.
Note that the input angles should be in latitude/longitude or
elevation/azimuthal form. I.e., the origin is along the equator
rather than at the north pole.
.. note::
This is a low-level function used internally in
`astropy.coordinates`. It is provided for users if they really
want to use it, but it is recommended that you use the
`astropy.coordinates` coordinate systems.
Parameters
----------
r : scalar, array-like, or `~astropy.units.Quantity`
The radial coordinate (in the same units as the inputs).
lat : scalar, array-like, or `~astropy.units.Quantity`
The latitude (in radians if array or scalar)
lon : scalar, array-like, or `~astropy.units.Quantity`
The longitude (in radians if array or scalar)
Returns
-------
x : float or array
The first cartesian coordinate.
y : float or array
The second cartesian coordinate.
z : float or array
The third cartesian coordinate.
"""
if not hasattr(r, 'unit'):
r = r * u.dimensionless_unscaled
if not hasattr(lat, 'unit'):
lat = lat * u.radian
if not hasattr(lon, 'unit'):
lon = lon * u.radian
sph = SphericalRepresentation(distance=r, lat=lat, lon=lon)
cart = sph.represent_as(CartesianRepresentation)
return cart.x, cart.y, cart.z
def get_sun(time):
"""
Determines the location of the sun at a given time (or times, if the input
is an array `~astropy.time.Time` object), in geocentric coordinates.
Parameters
----------
time : `~astropy.time.Time`
The time(s) at which to compute the location of the sun.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord`
The location of the sun as a `~astropy.coordinates.SkyCoord` in the
`~astropy.coordinates.GCRS` frame.
Notes
-----
The algorithm for determining the sun/earth relative position is based
on the simplified version of VSOP2000 that is part of ERFA. Compared to
JPL's ephemeris, it should be good to about 4 km (in the Sun-Earth
vector) from 1900-2100 C.E., 8 km for the 1800-2200 span, and perhaps
250 km over the 1000-3000.
"""
earth_pv_helio, earth_pv_bary = erfa.epv00(*get_jd12(time, 'tdb'))
# We have to manually do aberration because we're outputting directly into
# GCRS
earth_p = earth_pv_helio[..., 0, :]
earth_v = earth_pv_bary[..., 1, :]
# convert barycentric velocity to units of c, but keep as array for passing in to erfa
earth_v /= c.to_value(u.au/u.d)
dsun = np.sqrt(np.sum(earth_p**2, axis=-1))
invlorentz = (1-np.sum(earth_v**2, axis=-1))**0.5
properdir = erfa.ab(earth_p/dsun.reshape(dsun.shape + (1,)),
-earth_v, dsun, invlorentz)
cartrep = CartesianRepresentation(x=-dsun*properdir[..., 0] * u.AU,
y=-dsun*properdir[..., 1] * u.AU,
z=-dsun*properdir[..., 2] * u.AU)
return SkyCoord(cartrep, frame=GCRS(obstime=time))
def concatenate(coords):
"""
Combine multiple coordinate objects into a single
`~astropy.coordinates.SkyCoord`.
"Coordinate objects" here mean frame objects with data,
`~astropy.coordinates.SkyCoord`, or representation objects. Currently,
they must all be in the same frame, but in a future version this may be
relaxed to allow inhomogenous sequences of objects.
Parameters
----------
coords : sequence of coordinate objects
The objects to concatenate
Returns
-------
cskycoord : SkyCoord
A single sky coordinate with its data set to the concatenation of all
the elements in ``coords``
"""
if getattr(coords, 'isscalar', False) or not isiterable(coords):
raise TypeError('The argument to concatenate must be iterable')
return SkyCoord(coords)
# global dictionary that caches repeatedly-needed info for get_constellation
_constellation_data = {}
def get_constellation(coord, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) a given coordinate object contains.
Parameters
----------
coords : coordinate object
The object to determine the constellation of.
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If ``coords`` contains a scalar coordinate, returns the name of the
constellation. If it is an array coordinate object, it returns an array
of names.
Notes
-----
To determine which constellation a point on the sky is in, this precesses
to B1875, and then uses the Delporte boundaries of the 88 modern
constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
"""
if constellation_list != 'iau':
raise ValueError("only 'iau' us currently supported for constellation_list")
# read the data files and cache them if they haven't been already
if not _constellation_data:
cdata = data.get_pkg_data_contents('data/constellation_data_roman87.dat')
ctable = ascii.read(cdata, names=['ral', 'rau', 'decl', 'name'])
cnames = data.get_pkg_data_contents('data/constellation_names.dat', encoding='UTF8')
cnames_short_to_long = dict([(l[:3], l[4:])
for l in cnames.split('\n')
if not l.startswith('#')])
cnames_long = np.array([cnames_short_to_long[nm] for nm in ctable['name']])
_constellation_data['ctable'] = ctable
_constellation_data['cnames_long'] = cnames_long
else:
ctable = _constellation_data['ctable']
cnames_long = _constellation_data['cnames_long']
isscalar = coord.isscalar
# if it is geocentric, we reproduce the frame but with the 1875 equinox,
# which is where the constellations are defined
constel_coord = coord.transform_to(PrecessedGeocentric(equinox='B1875'))
if isscalar:
rah = constel_coord.ra.ravel().hour
decd = constel_coord.dec.ravel().deg
else:
rah = constel_coord.ra.hour
decd = constel_coord.dec.deg
constellidx = -np.ones(len(rah), dtype=int)
notided = constellidx == -1 # should be all
for i, row in enumerate(ctable):
msk = (row['ral'] < rah) & (rah < row['rau']) & (decd > row['decl'])
constellidx[notided & msk] = i
notided = constellidx == -1
if np.sum(notided) == 0:
break
else:
raise ValueError('Could not find constellation for coordinates {0}'.format(constel_coord[notided]))
if short_name:
names = ctable['name'][constellidx]
else:
names = cnames_long[constellidx]
if isscalar:
return names[0]
else:
return names
|
|
import sys
import threading
import weakref
from django.utils.six.moves import xrange
if sys.version_info < (3, 4):
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if sys.version_info >= (3, 4):
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
with self.lock:
self._clear_dead_receivers()
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
self.sender_receivers_cache.clear()
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
|
|
# coding: utf-8
import inspect
import re
import json
import logging
from itertools import filterfalse
import arrow
from jira import JIRA
from jira.exceptions import JIRAError
from lazy import lazy
from slackbot.bot import Bot, listen_to, respond_to
from . import settings
from utils.messages_cache import MessagesCache
from utils.imageproxy import converturl_proxyurl
from utils.notifier_bot import NotifierBot, NotifierJob
from utils.slackbot_utils import send_message
logger = logging.getLogger(__name__)
MAX_NOTIFIERS_WORKERS = 2
def get_Jira_instance(server):
auth = None
if 'username' in server and 'password' in server:
auth = (server['username'], server['password'])
return JIRA(
options={
'server': server['host'],
'verify': settings.servers.verify_ssl},
basic_auth=auth,
get_server_info=False,
max_retries=1
)
class JiraBot(object):
def __init__(self, cache, jira_server, imageproxy_server, prefixes):
self.__cache = cache
self.__jira_server = jira_server
self.__imageproxy_server = imageproxy_server
self.__prefixes = prefixes
self.__jira_regex = re.compile(self.get_pattern(), re.IGNORECASE)
@lazy
def __jira(self):
return get_Jira_instance(self.__jira_server)
def get_pattern(self, prefixes=None):
if prefixes is None:
prefixes = self.__prefixes
jira_prefixes = '|'.join(prefixes)
return r'(?:^|\s|[\W]+)(?<!CLEAN\s)((?:{})-[\d]+)(?:$|\s|[\W]+)'\
.format(jira_prefixes)
def get_prefixes(self):
return self.__prefixes
def get_issue_status(self, key):
try:
issue = self.__jira.issue(key, fields='status')
return issue.fields.status.name
except JIRAError:
return None
def close(self, key, user):
issue = self.__jira.issue(key, fields='subtasks,status')
comment = 'Closed by AtlassianBot (Original user: {})'.format(user)
for subtask in issue.fields.subtasks:
if str(subtask.fields.status) != 'Closed':
self.__jira.transition_issue(
subtask,
'Closed',
comment=comment,
assignee={'name': user}
)
if str(issue.fields.status) != 'Closed':
self.__jira.transition_issue(
issue,
'Closed',
comment=comment,
assignee={'name': user}
)
def display_issues(self, message):
attachments = []
issues = self.__jira_regex.findall(message.body['text'])
def filter_predicate(x):
return self.__cache.IsInCache(self.__get_cachekey(x, message))
for issue in filterfalse(filter_predicate, issues):
self.__cache.AddToCache(self.__get_cachekey(issue, message))
issue_message = self.get_issue_message(issue)
if issue_message is None:
issue_message = self.__get_issuenotfound_message(issue)
attachments.append(issue_message)
if attachments:
send_message(message, '', json.dumps(attachments))
def get_issue_message(self, key):
try:
issue = self.__jira.issue(key, fields='summary,issuetype')
icon = converturl_proxyurl(
self.__imageproxy_server['host'],
issue.fields.issuetype.iconUrl)
summary = issue.fields.summary.encode('utf8')
return {
'fallback': '{key} - {summary}\n{url}'.format(
key=issue.key,
summary=summary.decode(),
url=issue.permalink()
),
'author_name': issue.key,
'author_link': issue.permalink(),
'author_icon': icon,
'text': summary.decode(),
'color': '#59afe1'
}
except JIRAError as ex:
return self.__get_error_message(ex)
def __get_issuenotfound_message(self, key):
return {
'fallback': 'Issue {key} not found'.format(key=key),
'author_name': key,
'text': ':exclamation: Issue not found',
'color': 'warning'
}
def __get_error_message(self, exception):
if (exception.status_code == 401):
return {
'fallback': 'Jira authentication error',
'text': ':exclamation: Jira authentication error',
'color': 'danger'
}
def __get_cachekey(self, issue, message):
return issue + message.body['channel']
class JiraNotifierBot(NotifierBot):
def __init__(self,
jira_server,
imageproxy_server,
config,
slackclient=None):
super().__init__(slackclient)
self.__jira_server = jira_server
self.__imageproxy_server = imageproxy_server
self._jobs = list(self.submit_jobs(config))
def submit_jobs(self, config):
for notifier_settings in config['notifiers']:
logger.info('registered JiraNotifierBot for query \'%s\' '
'on channel \'#%s\'',
notifier_settings['query'],
notifier_settings['channel'])
job = JiraNotifierJob(
self.__jira,
self.__imageproxy_server,
notifier_settings,
config['polling_interval'])
self.submit(job)
yield job
@lazy
def __jira(self):
return get_Jira_instance(self.__jira_server)
class JiraNotifierJob(NotifierJob):
def __init__(self, jira, imageproxy, config, polling_interval):
super().__init__(config['channel'], polling_interval)
self.__jira = jira
self.__imageproxy = imageproxy
self.__config = config
def init(self):
# First query to retrieve last matching task
query = '{} AND status = Closed ORDER BY updated DESC'\
.format(self.__config['query'])
results = self.__jira.search_issues(query, maxResults=1)
if len(results) == 0:
logger.error('No initial issue found')
return
self.__last_result = results[0]
def run(self):
# Convert last issue update date
# to a compatible timestamp for Jira
date = arrow.get(self.__last_result.fields.updated)
last_update = (date.timestamp + 1) * 1000
query = '{} AND status CHANGED TO Closed DURING({}, NOW()) '\
'ORDER BY updated DESC'\
.format(self.__config['query'], last_update)
fields = 'summary,customfield_10012,updated,issuetype,assignee'
results = self.__jira.search_issues(
query,
fields=fields,
expand='changelog')
if len(results) > 0:
self.__last_result = results[0]
attachments = []
for issue in results[::-1]:
summary = issue.fields.summary.encode('utf8')
icon = converturl_proxyurl(
self.__imageproxy['host'],
issue.fields.issuetype.iconUrl)
sps = self.__get_storypoints(issue)
sps = self.__formatvalue(sps)
author = self.__get_author(issue)
author = self.__formatvalue(author)
status = self.__get_status(issue)
status = self.__formatvalue(status)
attachments.append({
'fallback': '{key} - {summary}\n{url}'.format(
key=issue.key,
summary=summary.decode(),
url=issue.permalink()
),
'author_name': issue.key,
'author_link': issue.permalink(),
'author_icon': icon,
'text': summary.decode(),
'color': '#14892c',
'mrkdwn_in': ['fields'],
'fields': [
{
'title': '{} by'.format(status),
'value': author,
'short': True
},
{
'title': 'Story points',
'value': sps,
'short': True
}
],
})
self.send_message(attachments)
def __get_storypoints(self, issue):
if hasattr(issue.fields, 'customfield_10012') and \
issue.fields.customfield_10012 is not None:
return issue.fields.customfield_10012
def __get_author(self, issue):
if len(issue.changelog.histories) > 0:
event = issue.changelog.histories[-1]
# Search in history in we have a transition to Closed
# with a change of the assignee
# => it's probably a transition done by the bot
res = next((x for x in event.items if x.field == 'assignee'), None)
if res is not None:
author = res.to
else:
author = issue.fields.assignee.name
user_id = self._slackclient.find_user_by_name(author)
return '<@{}>'.format(user_id) if user_id else '@{}'.format(author)
def __get_status(self, issue):
if len(issue.changelog.histories) > 0:
event = issue.changelog.histories[-1]
for item in event.items:
if item.field == 'status':
return item.toString
def __formatvalue(self, value):
return value if value else 'N/A'
def __get_channel(self, channelname):
for id, channel in list(self.slackclient.channels.items()):
if channel.get('name', None) == channelname:
return id
if (settings.plugins.jiranotifier.enabled):
JiraNotifierBot(
settings.servers.jira,
settings.servers.imageproxy,
settings.plugins.jiranotifier)
instance = JiraBot(
MessagesCache(),
settings.servers.jira,
settings.servers.imageproxy,
settings.plugins.jirabot.prefixes)
if (settings.plugins.jirabot.enabled):
@listen_to(instance.get_pattern(), re.IGNORECASE)
@respond_to(instance.get_pattern(), re.IGNORECASE)
def jirabot(message, _):
instance.display_issues(message)
|
|
import wx
class AddNewSourcePanelView ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 402,528 ), style = wx.TAB_TRAVERSAL )
self.SetMinSize( wx.Size( 402,528 ) )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
sbSizer1 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Required Fields:" ), wx.VERTICAL )
bSizer18 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText12 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Organization", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText12.Wrap( -1 )
bSizer18.Add( self.m_staticText12, 0, wx.ALL, 5 )
bSizer18.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl3 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl3.SetMinSize( wx.Size( 280,-1 ) )
bSizer18.Add( self.m_textCtrl3, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer18, 1, wx.EXPAND, 5 )
bSizer19 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText14 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Description", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText14.Wrap( -1 )
bSizer19.Add( self.m_staticText14, 0, wx.ALL, 5 )
bSizer19.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl31 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl31.SetMinSize( wx.Size( 280,-1 ) )
bSizer19.Add( self.m_textCtrl31, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer19, 1, wx.EXPAND, 5 )
bSizer20 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText15 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Citation", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText15.Wrap( -1 )
bSizer20.Add( self.m_staticText15, 0, wx.ALL, 5 )
bSizer20.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl32 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl32.SetMinSize( wx.Size( 280,-1 ) )
bSizer20.Add( self.m_textCtrl32, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer20, 1, wx.EXPAND, 5 )
bSizer21 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText16 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Contact Name", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText16.Wrap( -1 )
bSizer21.Add( self.m_staticText16, 0, wx.ALL, 5 )
bSizer21.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl33 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl33.SetMinSize( wx.Size( 280,-1 ) )
bSizer21.Add( self.m_textCtrl33, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer21, 1, wx.EXPAND, 5 )
bSizer22 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText17 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Contact Address", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText17.Wrap( -1 )
bSizer22.Add( self.m_staticText17, 0, wx.ALL, 5 )
bSizer22.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl34 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl34.SetMinSize( wx.Size( 280,-1 ) )
bSizer22.Add( self.m_textCtrl34, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer22, 1, wx.EXPAND, 5 )
bSizer23 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText18 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Contact Phone", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText18.Wrap( -1 )
bSizer23.Add( self.m_staticText18, 0, wx.ALL, 5 )
bSizer23.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl35 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl35.SetMinSize( wx.Size( 280,-1 ) )
bSizer23.Add( self.m_textCtrl35, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer23, 1, wx.EXPAND, 5 )
bSizer24 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText19 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Contact Email", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText19.Wrap( -1 )
bSizer24.Add( self.m_staticText19, 0, wx.ALL, 5 )
bSizer24.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl36 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl36.SetMinSize( wx.Size( 280,-1 ) )
bSizer24.Add( self.m_textCtrl36, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer24, 1, wx.EXPAND, 5 )
bSizer25 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText20 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"State", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText20.Wrap( -1 )
bSizer25.Add( self.m_staticText20, 0, wx.ALL, 5 )
bSizer25.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl37 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl37.SetMinSize( wx.Size( 280,-1 ) )
bSizer25.Add( self.m_textCtrl37, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer25, 1, wx.EXPAND, 5 )
bSizer28 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText21 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"City", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText21.Wrap( -1 )
bSizer28.Add( self.m_staticText21, 0, wx.ALL, 5 )
bSizer28.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl20 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl20.SetMinSize( wx.Size( 142,-1 ) )
bSizer28.Add( self.m_textCtrl20, 0, wx.ALL, 5 )
self.m_staticText22 = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"Zip", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText22.Wrap( -1 )
bSizer28.Add( self.m_staticText22, 0, wx.ALL, 5 )
self.m_textCtrl21 = wx.TextCtrl( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl21.SetMinSize( wx.Size( 100,-1 ) )
bSizer28.Add( self.m_textCtrl21, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer28, 1, wx.EXPAND, 5 )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
bSizer8.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
m_comboBox1Choices = []
self.m_comboBox1 = wx.ComboBox( sbSizer1.GetStaticBox(), wx.ID_ANY, u"ISO Metadata", wx.DefaultPosition, wx.DefaultSize, m_comboBox1Choices, 0 )
self.m_comboBox1.SetMinSize( wx.Size( 230,-1 ) )
bSizer8.Add( self.m_comboBox1, 0, wx.ALL, 5 )
self.m_button2 = wx.Button( sbSizer1.GetStaticBox(), wx.ID_ANY, u"+", wx.DefaultPosition, wx.Size( 40,27 ), 0 )
self.m_button2.SetFont( wx.Font( 15, 70, 90, 92, False, wx.EmptyString ) )
bSizer8.Add( self.m_button2, 0, wx.ALL, 5 )
sbSizer1.Add( bSizer8, 1, wx.EXPAND, 5 )
bSizer1.Add( sbSizer1, 1, 0, 5 )
sbSizer2 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Optional Fields:" ), wx.VERTICAL )
bSizer26 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText23 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Link", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText23.Wrap( -1 )
bSizer26.Add( self.m_staticText23, 0, wx.ALL, 5 )
bSizer26.AddSpacer( ( 70, 0), 1, wx.EXPAND, 5 )
self.m_textCtrl22 = wx.TextCtrl( sbSizer2.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_textCtrl22.SetMinSize( wx.Size( 280,-1 ) )
bSizer26.Add( self.m_textCtrl22, 0, wx.ALL, 5 )
sbSizer2.Add( bSizer26, 1, wx.EXPAND, 5 )
bSizer1.Add( sbSizer2, 1, 0, 5 )
m_sdbSizer1 = wx.StdDialogButtonSizer()
self.m_sdbSizer1OK = wx.Button( self, wx.ID_OK )
m_sdbSizer1.AddButton( self.m_sdbSizer1OK )
self.m_sdbSizer1Cancel = wx.Button( self, wx.ID_CANCEL )
m_sdbSizer1.AddButton( self.m_sdbSizer1Cancel )
m_sdbSizer1.Realize();
bSizer1.Add( m_sdbSizer1, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
def __del__( self ):
pass
|
|
import json
import string
import uuid
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext
import jinja2
import six
import olympia.core.logger
from olympia import amo, constants
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ManagerBase, ModelBase
from olympia.bandwagon.models import Collection
from olympia.files.models import File
from olympia.ratings.models import Rating
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.users.templatetags.jinja_helpers import user_link
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.amo.activity')
# Number of times a token can be used.
MAX_TOKEN_USE_COUNT = 100
class ActivityLogToken(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(
Version, related_name='token', on_delete=models.CASCADE)
user = models.ForeignKey(
'users.UserProfile', related_name='activity_log_tokens',
on_delete=models.CASCADE)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
use_count = models.IntegerField(
default=0,
help_text='Stores the number of times the token has been used')
class Meta:
db_table = 'log_activity_tokens'
unique_together = ('version', 'user')
def is_expired(self):
return self.use_count >= MAX_TOKEN_USE_COUNT
def is_valid(self):
return (not self.is_expired() and
self.version == self.version.addon.find_latest_version(
channel=self.version.channel, exclude=()))
def expire(self):
self.update(use_count=MAX_TOKEN_USE_COUNT)
def increment_use(self):
self.__class__.objects.filter(pk=self.pk).update(
use_count=models.expressions.F('use_count') + 1)
self.use_count = self.use_count + 1
class ActivityLogEmails(ModelBase):
"""A log of message ids of incoming emails so we don't duplicate process
them."""
messageid = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'log_activity_emails'
class AddonLog(ModelBase):
"""
This table is for indexing the activity log by addon.
"""
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_addon'
ordering = ('-created',)
def transfer(self, new_addon):
try:
# arguments is a structure:
# ``arguments = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
arguments = json.loads(self.activity_log._arguments)
except Exception:
log.debug('unserializing data from addon_log failed: %s' %
self.activity_log.id)
return None
new_arguments = []
for item in arguments:
if item.get('addons.addon', 0) == self.addon.id:
new_arguments.append({'addons.addon': new_addon.id})
else:
new_arguments.append(item)
self.activity_log.update(_arguments=json.dumps(new_arguments))
self.update(addon=new_addon)
class CommentLog(ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
"""
This table is for indexing the activity log by access group.
"""
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class DraftComment(ModelBase):
"""A model that allows us to draft comments for reviews before we have
an ActivityLog instance ready.
This is being used by the commenting API by the code-manager.
"""
id = PositiveAutoField(primary_key=True)
comments = models.TextField()
version = models.ForeignKey(Version, on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_comment_draft'
class ActivityLogManager(ManagerBase):
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
vals = (AddonLog.objects.filter(addon__in=addons)
.values_list('activity_log', flat=True))
if vals:
return self.filter(pk__in=list(vals))
else:
return self.none()
def for_version(self, version):
vals = (VersionLog.objects.filter(version=version)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_groups(self, groups):
if isinstance(groups, Group):
groups = (groups,)
return self.filter(grouplog__group__in=groups)
def for_user(self, user):
vals = (UserLog.objects.filter(user=user)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_developer(self):
return self.exclude(action__in=constants.activity.LOG_ADMINS +
constants.activity.LOG_HIDE_DEVELOPER)
def admin_events(self):
return self.filter(action__in=constants.activity.LOG_ADMINS)
def moderation_events(self):
return self.filter(action__in=constants.activity.LOG_RATING_MODERATION)
def review_queue(self):
qs = self._by_type()
return (qs.filter(action__in=constants.activity.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID))
def review_log(self):
qs = self._by_type()
return (
qs.filter(action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION)
.exclude(user__id=settings.TASK_USER_ID))
def total_ratings(self, theme=False):
"""Return the top users, and their # of reviews."""
qs = self._by_type()
action_ids = ([amo.LOG.THEME_REVIEW.id] if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION)
return (qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=action_ids)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def monthly_reviews(self, theme=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
actions = ([constants.activity.LOG.THEME_REVIEW.id] if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION)
return (qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date,
action__in=actions)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION,
user__id=user.id)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return next(i for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id) + 1
except StopIteration:
return None
def total_ratings_user_position(self, user, theme=False):
return self.user_position(self.total_ratings(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = super(ActivityLogManager, self).get_queryset()
table = 'log_activity_addon'
return qs.extra(
tables=[table],
where=['%s.activity_log_id=%s.id'
% (table, 'log_activity')])
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
@python_2_unicode_compatible
class ActivityLog(ModelBase):
TYPES = sorted(
[(value.id, key)
for key, value in constants.activity.LOG_BY_ID.items()])
user = models.ForeignKey(
'users.UserProfile', null=True, on_delete=models.SET_NULL)
action = models.SmallIntegerField(choices=TYPES, db_index=True)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@property
def arguments(self):
try:
# d is a structure:
# ``d = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
d = json.loads(self._arguments)
except Exception as e:
log.debug('unserializing data from addon_log failed: %s' % self.id)
log.debug(e)
return None
objs = []
for item in d:
# item has only one element.
model_name, pk = list(item.items())[0]
if model_name in ('str', 'int', 'null'):
objs.append(pk)
else:
# Cope with renames of key models:
if model_name == 'reviews.review':
model_name = 'ratings.rating'
(app_label, model_name) = model_name.split('.')
model = apps.get_model(app_label, model_name)
# Cope with soft deleted models and unlisted addons.
objs.extend(model.get_unfiltered_manager().filter(pk=pk))
return objs
@arguments.setter
def arguments(self, args=None):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, six.string_types):
serialize_me.append({'str': arg})
elif isinstance(arg, six.integer_types):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Addon, 3) for Addon with pk=3
serialize_me.append(
dict(((six.text_type(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(
dict(((six.text_type(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return constants.activity.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = constants.activity.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
rating = None
version = None
collection = None
tag = None
group = None
file_ = None
status = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
if arg.has_listed_versions():
addon = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
else:
addon = self.f(u'{0}', arg.name)
arguments.remove(arg)
if isinstance(arg, Rating) and not rating:
rating = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), ugettext('Review'))
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = ugettext('Version {0}')
if arg.channel == amo.RELEASE_CHANNEL_LISTED:
version = self.f(u'<a href="{1}">%s</a>' % text,
arg.version, arg.get_url_path())
else:
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.tag_text)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, File) and not file_:
validation = 'passed'
if self.action in (
amo.LOG.UNLISTED_SIGNED.id,
amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id):
validation = 'ignored'
file_ = self.f(u'<a href="{0}">{1}</a> (validation {2})',
reverse('files.list', args=[arg.pk]),
arg.filename,
validation)
arguments.remove(arg)
if (self.action == amo.LOG.CHANGE_STATUS.id and
not isinstance(arg, Addon)):
# Unfortunately, this action has been abused in the past and
# the non-addon argument could be a string or an int. If it's
# an int, we want to retrieve the string and translate it.
# Note that we use STATUS_CHOICES_PERSONA because it's a
# superset of STATUS_CHOICES_ADDON, and we need to handle all
# statuses.
if isinstance(arg, int) and arg in amo.STATUS_CHOICES_PERSONA:
status = ugettext(amo.STATUS_CHOICES_PERSONA[arg])
else:
# It's not an int or not one of the choices, so assume it's
# a string or an unknown int we want to display as-is.
status = arg
arguments.remove(arg)
user = user_link(self.user)
try:
kw = {
'addon': addon,
'rating': rating,
'version': version,
'collection': collection,
'tag': tag,
'user': user,
'group': group,
'file': file_,
'status': status,
}
return self.f(six.text_type(format), *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __str__(self):
return self.to_string()
def __html__(self):
return self
@property
def author_name(self):
"""Name of the user that triggered the activity.
If it's a reviewer action that will be shown to developers, the
`reviewer_name` property is used if present, otherwise `name` is
used."""
if self.action in constants.activity.LOG_REVIEW_QUEUE_DEVELOPER:
return self.user.reviewer_name or self.user.name
return self.user.name
@classmethod
def create(cls, action, *args, **kw):
"""
e.g. ActivityLog.create(amo.LOG.CREATE_ADDON, addon),
ActivityLog.create(amo.LOG.ADD_FILE_TO_VERSION, file, version)
In case of circular import you can use `olympia.activity.log_create()`
"""
from olympia import core
user = kw.get('user', core.get_user())
if not user:
log.warning('Activity log called with no user: %s' % action.id)
return
# We make sure that we take the timestamp if provided, instead of
# creating a new one, especially useful for log entries created
# in a loop.
al = ActivityLog(
user=user, action=action.id,
created=kw.get('created', timezone.now()))
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog.objects.create(
comments=al.details['comments'], activity_log=al,
created=kw.get('created', timezone.now()))
for arg in args:
if isinstance(arg, tuple):
class_ = arg[0]
id_ = arg[1]
else:
class_ = arg.__class__
id_ = arg.id if isinstance(arg, ModelBase) else None
if class_ == Addon:
AddonLog.objects.create(
addon_id=id_, activity_log=al,
created=kw.get('created', timezone.now()))
elif class_ == Version:
VersionLog.objects.create(
version_id=id_, activity_log=al,
created=kw.get('created', timezone.now()))
elif class_ == UserProfile:
UserLog.objects.create(
user_id=id_, activity_log=al,
created=kw.get('created', timezone.now()))
elif class_ == Group:
GroupLog.objects.create(
group_id=id_, activity_log=al,
created=kw.get('created', timezone.now()))
# Index by every user
UserLog.objects.create(
activity_log=al, user=user,
created=kw.get('created', timezone.now()))
return al
|
|
# standard libraries
import os
import functools
import collections
import collections.abc
import datetime
import json
import multiprocessing
import threading
import traceback
import time
# third party libraries
pass
# first party libraries
pass
__where__ = os.path.dirname(os.path.abspath(__file__))
class repeat(threading.Thread):
def __init__(self, function, period, how='thread', on_error=None,
args=None, kwargs=None):
super(repeat, self).__init__(daemon=True)
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if on_error is None:
def on_error(exception):
traceback.print_exc()
print()
def wrapped():
try:
function(*args, **kwargs)
except Exception as exception:
on_error(exception)
self.function = wrapped
self.period = period
if how == 'thread':
self.How = threading.Thread
elif how == 'process':
self.How = multiprocessing.Process
self.terminated = False
self.start()
def run(self):
while self.terminated == False:
try:
start = time.time()
runner = self.How(target=self.function)
runner.start()
runner.join()
duration = time.time() - start
if duration < self.period:
time.sleep(self.period - duration)
except:
continue
def terminate(self):
self.terminated = True
def cached_getter(allow_get=True, allow_set=True, allow_delete=True):
class Wrapper:
__slots__ = ('getter', 'name', 'cached_name', )
def __init__(self, getter):
self.getter = getter
self.name = getter.__name__
self.cached_name = '_cached_{}'.format(self.name)
def __get__(self, instance, owner=None):
if self.allow_get == False:
raise AttributeError
try:
return getattr(instance, self.cached_name)
except:
value = self.getter(instance)
setattr(instance, self.cached_name, value)
return value
def __set__(self, instance, value):
if self.allow_set == False:
raise AttributeError
setattr(instance, self.cached_name, value)
def __delete__(self, instance):
if self.allow_delete == False:
raise AttributeError
delattr(instance, self.cached_name)
Wrapper.allow_get = allow_get
Wrapper.allow_set = allow_set
Wrapper.allow_delete = allow_delete
return Wrapper
"""
def cached_setter(allow_get=True, set_once=False, allow_delete=True):
class Wrapper:
__slots__ = ('name', 'setter', 'was_set', 'value', )
def __init__(self, setter):
self.setter = setter
self.name = setter.__name__
self.was_set = False
def __get__(self, obj, type=None):
if self.allow_get == False:
raise AttributeError
return self.value
def __set__(self, obj, value):
if self.was_set and self.set_once:
raise AttributeError
self.value = self.setter(obj, value)
def __delete__(self, obj):
if self.allow_delete == False:
raise AttributeError
delattr(obj, self.name)
Wrapper.allow_get = allow_get
Wrapper.allow_delete = allow_delete
Wrapper.set_once = set_once
return Wrapper
"""
def once(f):
@functools.wraps(f)
def decorator(*args, **kwargs):
try:
return f._result
except AttributeError:
result = f._result = f(*args, **kwargs)
return result
return decorator
class LRUCache(collections.abc.MutableMapping):
def __init__(self, size=None):
if not isinstance(size, (int, float)):
raise TypeError()
else:
if size < 0:
raise ValueError()
self._size = size
self._cache = collections.OrderedDict()
def __getitem__(self, key):
return self.touch(key)
def flush(self):
self._cache = collections.OrderedDict()
@property
def overflowing(self):
return len(self) > self.size
def touch(self, key):
value = self._cache.pop(key)
self._cache[key] = value
return value
def __setitem__(self, key, value):
self._cache[key] = value
if self.size is not None:
self.squeeze()
@property
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
self.squeeze()
def squeeze(self):
while self.overflowing:
self._cache.popitem(last=False)
def __delitem__(self, key):
del self._cache[key]
def __iter__(self):
return iter(self._cache)
def __len__(self):
return len(self._cache)
class When:
@staticmethod
def timestamp(when=None, format='%Y-%m-%dT%H:%M:%SZ'):
if when is None:
when = datetime.datetime.utcnow()
return when.strftime(format)
@staticmethod
def iso_timestamp(when=None):
return When.timestamp(when, format='%Y-%m-%dT%H:%M:%SZ')
@staticmethod
def unix_timestamp(when=None):
if when is None:
when = datetime.datetime.utcnow()
return when.timestamp()
@staticmethod
def http_timestamp(when=None):
return When.timestamp(when, format='%a, %d-%b-%Y %T GMT')
"""
@staticmethod
def from_timestamp(timestamp, format='YYYY-MM-DD'):
pass
"""
class JsonEncoder(json.JSONEncoder):
def __init__(self, indent=None, serializers=None):
super(JsonEncoder, self).__init__(indent=indent)
if serializers is None:
serializers = {}
self.serializers = serializers
def default(self, obj):
try:
serializer = self.serializers[obj.__class__]
return serializer(obj)
except:
return super(JsonEncoder, self).default(obj)
|
|
# coding: utf-8
# # quick recap
#
# You now have both `while` loop and `for` loop in your toolset.
# Let's look quickly at yesterday's last tutorial task.
# However, I also will also upload general solution notebook files later today)
# In[ ]:
for fIndex, y in enumerate(range(2, 5)):
countdown = y
yFactorial = 1
wIndex = 0
while countdown > 1:
yFactorial *= countdown
##### CHECKPOINT! #####
countdown -= 1
wIndex += 1
print("RESULT: %d! = %d" % (y, yFactorial))
# In[ ]:
# Question 3
print("%s %s %s %s %s" % ("fIndex", "y", "wIndex", "countdown", "yFactorial"))
for fIndex, y in enumerate(range(2, 5)):
countdown = y
yFactorial = 1
wIndex = 0
while countdown > 1:
yFactorial *= countdown
print("%-6i %1i %6i %9i %10i" % (fIndex, y, wIndex, countdown, yFactorial))
countdown -= 1
wIndex += 1
#print "RESULT: %d! = %d" % (y, yFactorial)
# # Today
#
# * Sublists
# * nested lists
# * Functions (the most fun object in Python in my mind)
# * global vs local variables
# * docstrings
# ## Extracting Sublists
#
# Sometimes we want to operate on only parts of lists.
# The syntax for this is particularly simple:
# In[ ]:
# create our favorite massRatios:
massRatios = list(range(10))
massRatios
# In[ ]:
massRatios[2:7]
# This is called `slicing` and the 2 parameters required are separated by a colon `:`.
#
# Similar to the parameters for the `range()` function, the starting number is `inclusive` while the ending number is `exclusive`.
#
# When the 1st parameter is left out, the slice starts at the beginning of the list, when the last parameter is left out it goes until the end:
# In[ ]:
print(massRatios[:4])
print(massRatios[4:])
# Note how in the first case, the length returned is the same as the value of the index you provide, thanks to 0-based indexing.
#
# Note, also, that thanks to the asymmetry of `inclusivity` for the start parameter vs `exclusivity` for the end parameter, you can use the same number twice to get both ends of a list, thisk creates easier to read code as well.
# In[ ]:
i = 5
print(massRatios[:i])
print(massRatios[i:])
# ## Nested lists
#
# Python allows for nesting lists. This allows for finer substructure of data storage.
# For example, storing vectors in a list:
# In[ ]:
v1 = [0,1,2]
v2 = [7,8,9]
# In[ ]:
vectors = [v1, v2]
vectors
# When accessing elements, you can also just nest the indexing:
# In[ ]:
vectors[0][1]
# In[ ]:
vectors[1][-1]
# ## Functions
Functions are useful because they allow us to perform operations many times without
repeating code snippets, keeping programs shorter, more managable, and more organized.
We will start with the Planck function,
# $B_{\lambda}(T) = \frac{2 h c^2}{\lambda^5 \left[\exp\left(\frac{h c}{\lambda k T}\right) - 1 \right]}$
# where $h$ is Planck's constant, $c$ is the speed of light,
# $k$ is Boltzmann's constant, $T$ is the blackbody temperature, and
# $\lambda$ is the wavelength.
# In[ ]:
# First, define the physical constants:
h = 6.626e-34 # J s, Planck's constant
k = 1.38e-23 # J K^-1, Boltzmann constant
c = 3.00e8 # m s^-1, speed of light
# Conversion between angstroms and meters
angPerM = 1e10
# The Planck function is a function of two variables;
# for now, we'll set T = 5,800 K, the photospheric temperature of the Sun
# and allow the wavelength to vary.
temp = 5800.0
from math import exp
# Define the function using def:
def intensity1(waveAng): # Function header
waveM = waveAng / angPerM # Will convert Angstroms to meters
B = 2 * h * c**2 / (waveM**5 * (exp(h * c / (waveM * k * temp)) - 1))
return B
# Units will be W / m^2 / m / ster
The above statement comprises the function body & return to the main program.
# In[ ]:
print('%e' % intensity1(5000.0)) # note the %e formatting string for exponent notation
Basic structure:
def function_name(argument):
<do some stuff>
...
<return stuff>
Notice especially: def, colon, indent
Optional: argument (or "input": you'll hear people say both), return statement
NOTE: Without a return statement, the function will still return None. More on None to come!
# In[ ]:
def funcNoReturn(x):
print("Answer:", x + 5)
return x+5
y = funcNoReturn(6)
print("y =", y)
Next we'll create a list of wavelengths at which to evaluate the Planck function:
# In[ ]:
waveList = [3000 + 100 * i for i in range(41)]
# #### Q. What did the above command do?
# In[ ]:
waveList
Now, we'll create an intensity list using another list comprehension:
# In[ ]:
intensityList = [intensity1(wave) for wave in waveList]
intensityList
# #### Q. What should the output of "intensityList" be?
For a nice print-out, make use of a for loop and the range function:
# In[ ]:
for index in range(len(waveList)):
print('wavelength (Angstrom) = {} Intensity (W m^-3 ster^-1) = {:.2e}' .format(waveList[index], intensityList[index]))
# #### Q. What will the output look like?
# ### Local and Global variables
When I define a function in the following manner,
# In[ ]:
def intensity1(waveAng): # Function header
waveM = waveAng / angPerM # Will convert Angstroms to meters
B = 2 * h * c**2 / (waveM**5 * (exp(h * c / (waveM * k * temp)) - 1))
return B
B is a local variable -- it only exists in the function IntensitySo this fails:
# In[ ]:
B
so does:
# In[ ]:
waveM
In contrast, h, k, c, and temp are global variables (defined above) and can
be accessed anywhere in the program or notebook.
Notes on global and local variables:
* Avoid local and global variables with the same name. (Works, but can be confusing)
* When there are variables of the same name, Python first looks for a local variable,
then a global variable, then a built-in function of that name.
* Avoid changing global variables in functions, although Python has a utility
for doing so: the global function.
# #### Q. What will this print?
# In[ ]:
g = 10
def f(x):
g = 11
return x + g
f(5), g
But:
# In[ ]:
g = 10
def f(x):
global g # Now "g" inside the function references the global variable
g = 11 # Give that variable a new value
return x + g
f(5), g
Use of "global" is generally frowned upon (dangerous), but here it is for completeness.
# ### Functions with multiple arguments
The Planck function is a function of wavelength AND temperature.
# In[ ]:
def intensity2(waveAng, temp): # 2nd version of function Intensity
waveM = waveAng / angPerM
B = 2 * h * c**2 / (waveM**5 * (exp(h * c / (waveM * k * temp)) - 1))
return B
# In[ ]:
intensity2(5000.0, 5800.0)
"call sequence" simple, nothing fancy! Just comma-separated values to supply multiple arguments.But, you could also call the function with names for arguments:
# In[ ]:
intensity2(temp=5800.0, waveAng=5000.0)
or
# In[ ]:
intensity2(waveAng=5000.0, temp=5800.0)
# #### Q. Will this work (produce the same result)?
# In[ ]:
intensity2(5800.0, 5000.0)
Functions are useful beyond just evaluating equations.
Here's another example (albeit another mathematical one).
We needed a wavelength list for the previous example with the Planck function;
let's write a function to make that for us.
# In[ ]:
def waveListGen(minWave, maxWave, delta):
waveList = []
wave = minWave
while wave <= maxWave:
waveList.append(wave)
wave += delta
return waveList
# #### Q. What will this do?
# In[ ]:
waveList = waveListGen(3000, 5000, 200)
waveList
Note that the waveListGen function we just wrote is more flexible than our previous approach,
wavelengthList = [3000.0 + 100.0 * i for i in range(41)]
since we can easily modify the start, stop, and wavelength spacing.
On the other hand, we could just use range:
# In[ ]:
list(range(3000, 5001, 200))
# ### Functions with multiple return values
Given a wavelength, we can return the frequency and the
value of the Planck function at that frequency:
# In[ ]:
# (Defined h, c, k above and imported math)
def intensity3(waveAng, temp): # 3rd version of function Intensity
waveM = waveAng / angPerM
B = 2 * h * c**2 / (waveM**5 * (exp(h * c / (waveM * k * temp)) - 1))
return (waveAng, B)
# In[ ]:
temp = 10000.0 # Hot A star or cool B star; brighter than a G star
waveAng, intensity = intensity3(6000.0, temp=temp)
waveAng, intensity # notice the automatic packing of Python again
There must be two variables on the left-hand side of the assignment operator since
the function will return two variables, or else if there is only only variable it
will contain both values as a tuple (see cell below).
This is yet another instance of "unpacking," which we saw while using the "enumerate"
function, and when working with tuples.
# In[ ]:
result = intensity3(6000.0, 10000.0)
print(result)
type(result)
We've already seen how to make nice table outputs, so let's do it here:
# In[ ]:
for wave in waveListGen(3e3, 4e3, 100):
print('Wavelength (Angstroms) = %-10i Intensity (W m^-3 ster^-1) = %.2e' % intensity3(wave, 1e4))
The %-10i prints real numbers with ten spaces, left justified.
By default (i.e., no minus sign), columns are right justified.Notice how compact that for loop is!
We initialized the list of wavelengths right in the for loop, then, passed the
results of the calculation (using the function Intensity3) directly to string formatting.
This is possible because the Intensity3 returns a tuple!
# # Doc Strings:
# Doc strings are used to document functions. They generally include:
#
# * A description of the functionality of the function
#
# * A list of arguments
#
# * A description of outputs (returned values)
#
# And, they serve as the help documentation!
#
# They go right after the function header and are enclosed within triple quotes.
# In[ ]:
def force(mass1, mass2, radius):
"""
Compute the gravitational force between two bodies.
Parameters
----------
mass1 : int, float
Mass of the first body, in kilograms.
mass2 : int, float
Mass of the second body, in kilograms.
radius : int, float
Separation of the bodies, in meters.
Example
-------
To compute force between Earth and the Sun:
>>> F = force(5.97e24, 1.99e30, 1.5e11)
Returns
-------
Force in Newtons : float
"""
G = 6.67e-11
return G * mass1 * mass2 / radius**2
# In[ ]:
result = force(5.97e24, 2.00e30, 1.5e11)
result
# In[ ]:
# To see the documentation for a function, use help:
help(force)
# or with the subwindow:
# In[ ]:
get_ipython().magic('pinfo force')
# ## Some important functionality review
# In[ ]:
# a = [] initialize an empty list
# a = [1., 2] initialize a list
# a.append(elem) add the elem object to the end of the list
# a + [5, 4] concatenate (join) two lists
# a.insert(i, e) insert element e at index i
# a[5] acess the value of the element at index 5
# a[-1] get the last list element value
# a[4:7] slice list a
# del a[i] delete list element with index i
# a.remove(e) remove list element with value e (not index e)
# a.index('test') find the index where the element has the value 'test'
# 4 in a find out whether 4 is in a
# a.count(4) count how many times 4 is in a
# len(a) return the number of elements in a
# min(a) return the smallest element in a
# max(a) return the largest element in a
# sum(a) add all the elements in a
# sorted(a) return a sorted version of list a
# reversed(a) return a reversed version of list a
# a[1][0][4] nested list indexing (3 dimensional list in this case)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import ddt
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from cinder.common import constants
from cinder import context
from cinder import db
from cinder import exception
from cinder import manager
from cinder import objects
from cinder.objects import fields
from cinder import rpc
from cinder import service
from cinder import test
test_service_opts = [
cfg.StrOpt("fake_manager",
default="cinder.tests.unit.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"), ]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
def __init__(self, host=None,
db_driver=None, service_name=None, cluster=None):
super(FakeManager, self).__init__(host=host,
db_driver=db_driver,
cluster=cluster)
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'cinder.tests.unit.test_service.FakeManager')
serv.start()
self.assertEqual('manager', serv.test_method())
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'cinder.tests.unit.test_service.FakeManager')
serv.start()
self.assertEqual('service', serv.test_method())
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'})
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'})
def test_reset(self):
serv = service.Service('test',
'test',
'test',
'cinder.tests.unit.test_service.FakeManager')
serv.start()
serv.reset()
self.assertEqual({}, rpc.LAST_OBJ_VERSIONS)
self.assertEqual({}, rpc.LAST_RPC_VERSIONS)
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
ctxt = context.get_admin_context()
self.flags(enable_new_services=True)
host = 'foo'
binary = 'cinder-fake'
cluster = 'cluster'
app = service.Service.create(host=host, binary=binary, cluster=cluster)
ref = db.service_get(ctxt, app.service_id)
db.service_destroy(ctxt, app.service_id)
self.assertFalse(ref.disabled)
# Check that the cluster is also enabled
db_cluster = objects.ClusterList.get_all(ctxt)[0]
self.assertFalse(db_cluster.disabled)
db.cluster_destroy(ctxt, db_cluster.id)
def test_service_disabled_on_create_based_on_flag(self):
ctxt = context.get_admin_context()
self.flags(enable_new_services=False)
host = 'foo'
binary = 'cinder-fake'
cluster = 'cluster'
app = service.Service.create(host=host, binary=binary, cluster=cluster)
ref = db.service_get(ctxt, app.service_id)
db.service_destroy(ctxt, app.service_id)
self.assertTrue(ref.disabled)
# Check that the cluster is also enabled
db_cluster = objects.ClusterList.get_all(ctxt)[0]
self.assertTrue(db_cluster.disabled)
db.cluster_destroy(ctxt, db_cluster.id)
@ddt.ddt
class ServiceTestCase(test.TestCase):
"""Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.host = 'foo'
self.binary = 'cinder-fake'
self.topic = 'fake'
self.service_ref = {'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1,
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
self.ctxt = context.get_admin_context()
def _check_app(self, app, cluster=None, cluster_exists=None,
svc_id=None, added_to_cluster=True):
"""Check that Service instance and DB service and cluster are ok."""
self.assertIsNotNone(app)
# Check that we have the service ID
self.assertTrue(hasattr(app, 'service_id'))
if svc_id:
self.assertEqual(svc_id, app.service_id)
# Check that cluster has been properly set
self.assertEqual(cluster, app.cluster)
# Check that the entry has been really created in the DB
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
cluster_name = cluster if cluster_exists is not False else None
# Check that cluster name matches
self.assertEqual(cluster_name, svc.cluster_name)
clusters = objects.ClusterList.get_all(self.ctxt)
if cluster_name:
# Make sure we have created the cluster in the DB
self.assertEqual(1, len(clusters))
cluster = clusters[0]
self.assertEqual(cluster_name, cluster.name)
self.assertEqual(self.binary, cluster.binary)
else:
# Make sure we haven't created any cluster in the DB
self.assertListEqual([], clusters.objects)
self.assertEqual(added_to_cluster, app.added_to_cluster)
def test_create_with_cluster_not_upgrading(self):
"""Test DB cluster creation when service is created."""
cluster_name = 'cluster'
app = service.Service.create(host=self.host, binary=self.binary,
cluster=cluster_name, topic=self.topic)
self._check_app(app, cluster_name)
def test_create_svc_exists_upgrade_cluster(self):
"""Test that we update cluster_name field when cfg has changed."""
# Create the service in the DB
db_svc = db.service_create(context.get_admin_context(),
{'host': self.host, 'binary': self.binary,
'topic': self.topic,
'cluster_name': None})
cluster_name = 'cluster'
app = service.Service.create(host=self.host, binary=self.binary,
cluster=cluster_name, topic=self.topic)
self._check_app(app, cluster_name, svc_id=db_svc.id,
added_to_cluster=cluster_name)
@mock.patch.object(objects.service.Service, 'get_by_args')
@mock.patch.object(objects.service.Service, 'get_by_id')
def test_report_state_newly_disconnected(self, get_by_id, get_by_args):
get_by_args.side_effect = exception.NotFound()
get_by_id.side_effect = db_exc.DBConnectionError()
with mock.patch.object(objects.service, 'db') as mock_db:
mock_db.service_create.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.report_state()
self.assertTrue(serv.model_disconnected)
self.assertFalse(mock_db.service_update.called)
@mock.patch.object(objects.service.Service, 'get_by_args')
@mock.patch.object(objects.service.Service, 'get_by_id')
def test_report_state_disconnected_DBError(self, get_by_id, get_by_args):
get_by_args.side_effect = exception.NotFound()
get_by_id.side_effect = db_exc.DBError()
with mock.patch.object(objects.service, 'db') as mock_db:
mock_db.service_create.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.report_state()
self.assertTrue(serv.model_disconnected)
self.assertFalse(mock_db.service_update.called)
@mock.patch('cinder.db.sqlalchemy.api.service_update')
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_report_state_newly_connected(self, get_by_id, service_update):
get_by_id.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assertFalse(serv.model_disconnected)
self.assertTrue(service_update.called)
def test_report_state_manager_not_working(self):
with mock.patch('cinder.db') as mock_db:
mock_db.service_get.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.manager.is_working = mock.Mock(return_value=False)
serv.start()
serv.report_state()
serv.manager.is_working.assert_called_once_with()
self.assertFalse(mock_db.service_update.called)
def test_service_with_long_report_interval(self):
self.override_config('service_down_time', 10)
self.override_config('report_interval', 10)
service.Service.create(
binary="test_service",
manager="cinder.tests.unit.test_service.FakeManager")
self.assertEqual(25, CONF.service_down_time)
@mock.patch.object(rpc, 'get_server')
@mock.patch('cinder.db')
def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc):
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.stop()
serv.wait()
serv.rpcserver.start.assert_called_once_with()
serv.rpcserver.stop.assert_called_once_with()
serv.rpcserver.wait.assert_called_once_with()
@mock.patch('cinder.service.Service.report_state')
@mock.patch('cinder.service.Service.periodic_tasks')
@mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(rpc, 'get_server')
@mock.patch('cinder.db')
def test_service_stop_waits_for_timers(self, mock_db, mock_rpc,
mock_loopcall, mock_periodic,
mock_report):
"""Test that we wait for loopcalls only if stop succeeds."""
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager',
report_interval=5,
periodic_interval=10,
)
# One of the loopcalls will raise an exception on stop
mock_loopcall.side_effect = (
mock.Mock(**{'stop.side_effect': Exception}),
mock.Mock())
serv.start()
serv.stop()
serv.wait()
serv.rpcserver.start.assert_called_once_with()
serv.rpcserver.stop.assert_called_once_with()
serv.rpcserver.wait.assert_called_once_with()
# The first loopcall will have failed on the stop call, so we will not
# have waited for it to stop
self.assertEqual(1, serv.timers[0].start.call_count)
self.assertEqual(1, serv.timers[0].stop.call_count)
self.assertFalse(serv.timers[0].wait.called)
# We will wait for the second loopcall
self.assertEqual(1, serv.timers[1].start.call_count)
self.assertEqual(1, serv.timers[1].stop.call_count)
self.assertEqual(1, serv.timers[1].wait.call_count)
@mock.patch('cinder.manager.Manager.init_host')
@mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall')
@mock.patch('oslo_messaging.Target')
@mock.patch.object(rpc, 'get_server')
def _check_rpc_servers_and_init_host(self, app, added_to_cluster, cluster,
rpc_mock, target_mock, loop_mock,
init_host_mock):
app.start()
# Since we have created the service entry we call init_host with
# added_to_cluster=True
init_host_mock.assert_called_once_with(
added_to_cluster=added_to_cluster,
service_id=self.service_ref['id'])
expected_target_calls = [mock.call(topic=self.topic, server=self.host)]
expected_rpc_calls = [mock.call(target_mock.return_value, mock.ANY,
mock.ANY),
mock.call().start()]
if cluster and added_to_cluster:
self.assertIsNotNone(app.cluster_rpcserver)
expected_target_calls.append(mock.call(
topic=self.topic + '.' + cluster,
server=cluster.split('@')[0]))
expected_rpc_calls.extend(expected_rpc_calls[:])
# Check that we create message targets for host and cluster
target_mock.assert_has_calls(expected_target_calls)
# Check we get and start rpc services for host and cluster
rpc_mock.assert_has_calls(expected_rpc_calls)
self.assertIsNotNone(app.rpcserver)
app.stop()
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.6')
def test_start_rpc_and_init_host_no_cluster(self, is_upgrading_mock):
"""Test that without cluster we don't create rpc service."""
app = service.Service.create(host=self.host,
binary=constants.VOLUME_BINARY,
cluster=None, topic=self.topic)
self._check_rpc_servers_and_init_host(app, False, None)
@mock.patch('cinder.objects.Service.get_minimum_obj_version')
def test_start_rpc_and_init_host_cluster(self, get_min_obj_mock):
"""Test that with cluster we create the rpc service."""
get_min_obj_mock.return_value = '1.7'
cluster = 'cluster@backend#pool'
self.host = 'host@backend#pool'
app = service.Service.create(host=self.host,
binary=constants.VOLUME_BINARY,
cluster=cluster, topic=self.topic)
self._check_rpc_servers_and_init_host(app, True, cluster)
@mock.patch('cinder.objects.Cluster.get_by_id')
def test_ensure_cluster_exists_no_cluster(self, get_mock):
app = service.Service.create(host=self.host,
binary=self.binary,
topic=self.topic)
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
app._ensure_cluster_exists(self.ctxt, svc)
get_mock.assert_not_called()
self.assertEqual({}, svc.cinder_obj_get_changes())
@mock.patch('cinder.objects.Cluster.get_by_id')
def test_ensure_cluster_exists_cluster_exists_non_relicated(self,
get_mock):
cluster = objects.Cluster(
name='cluster_name', active_backend_id=None, frozen=False,
replication_status=fields.ReplicationStatus.NOT_CAPABLE)
get_mock.return_value = cluster
app = service.Service.create(host=self.host,
binary=self.binary,
topic=self.topic)
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
app.cluster = cluster.name
app._ensure_cluster_exists(self.ctxt, svc)
get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name,
binary=app.binary)
self.assertEqual({}, svc.cinder_obj_get_changes())
@mock.patch('cinder.objects.Cluster.get_by_id')
def test_ensure_cluster_exists_cluster_change(self, get_mock):
"""We copy replication fields from the cluster to the service."""
changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER,
active_backend_id='secondary',
frozen=True)
cluster = objects.Cluster(name='cluster_name', **changes)
get_mock.return_value = cluster
app = service.Service.create(host=self.host,
binary=self.binary,
topic=self.topic)
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
app.cluster = cluster.name
app._ensure_cluster_exists(self.ctxt, svc)
get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name,
binary=app.binary)
self.assertEqual(changes, svc.cinder_obj_get_changes())
@mock.patch('cinder.objects.Cluster.get_by_id')
def test_ensure_cluster_exists_cluster_no_change(self, get_mock):
"""Don't copy replication fields from cluster if replication error."""
changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER,
active_backend_id='secondary',
frozen=True)
cluster = objects.Cluster(name='cluster_name', **changes)
get_mock.return_value = cluster
app = service.Service.create(host=self.host,
binary=self.binary,
topic=self.topic)
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
svc.replication_status = fields.ReplicationStatus.ERROR
svc.obj_reset_changes()
app.cluster = cluster.name
app._ensure_cluster_exists(self.ctxt, svc)
get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name,
binary=app.binary)
self.assertEqual({}, svc.cinder_obj_get_changes())
def test_ensure_cluster_exists_cluster_create_replicated_and_non(self):
"""We use service replication fields to create the cluster."""
changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER,
active_backend_id='secondary',
frozen=True)
app = service.Service.create(host=self.host,
binary=self.binary,
topic=self.topic)
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
for key, value in changes.items():
setattr(svc, key, value)
app.cluster = 'cluster_name'
app._ensure_cluster_exists(self.ctxt, svc)
cluster = objects.Cluster.get_by_id(self.ctxt, None, name=app.cluster)
for key, value in changes.items():
self.assertEqual(value, getattr(cluster, key))
class TestWSGIService(test.TestCase):
@mock.patch('oslo_service.wsgi.Loader')
def test_service_random_port(self, mock_loader):
test_service = service.WSGIService("test_service")
self.assertEqual(0, test_service.port)
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_reset_pool_size_to_default(self, mock_loader):
test_service = service.WSGIService("test_service")
test_service.start()
# Stopping the service, which in turn sets pool size to 0
test_service.stop()
self.assertEqual(0, test_service.server._pool.size)
# Resetting pool size to default
test_service.reset()
test_service.start()
self.assertEqual(cfg.CONF.wsgi_default_pool_size,
test_service.server._pool.size)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_default(self, mock_loader):
self.override_config('osapi_volume_listen_port',
CONF.test_service_listen_port)
test_service = service.WSGIService("osapi_volume")
self.assertEqual(processutils.get_worker_count(),
test_service.workers)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_good_user_setting(self, mock_loader):
self.override_config('osapi_volume_listen_port',
CONF.test_service_listen_port)
self.override_config('osapi_volume_workers', 8)
test_service = service.WSGIService("osapi_volume")
self.assertEqual(8, test_service.workers)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_zero_user_setting(self, mock_loader):
self.override_config('osapi_volume_listen_port',
CONF.test_service_listen_port)
self.override_config('osapi_volume_workers', 0)
test_service = service.WSGIService("osapi_volume")
# If a value less than 1 is used, defaults to number of procs
# available
self.assertEqual(processutils.get_worker_count(),
test_service.workers)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_negative_user_setting(self, mock_loader):
self.override_config('osapi_volume_workers', -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "osapi_volume")
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Server')
@mock.patch('oslo_service.wsgi.Loader')
def test_ssl_enabled(self, mock_loader, mock_server):
self.override_config('osapi_volume_use_ssl', True)
service.WSGIService("osapi_volume")
mock_server.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
port=mock.ANY, host=mock.ANY,
use_ssl=True)
self.assertTrue(mock_loader.called)
class OSCompatibilityTestCase(test.TestCase):
def _test_service_launcher(self, fake_os):
# Note(lpetrut): The cinder-volume service needs to be spawned
# differently on Windows due to an eventlet bug. For this reason,
# we must check the process launcher used.
fake_process_launcher = mock.MagicMock()
with mock.patch('os.name', fake_os):
with mock.patch('cinder.service.process_launcher',
fake_process_launcher):
launcher = service.get_launcher()
if fake_os == 'nt':
self.assertEqual(service.Launcher, type(launcher))
else:
self.assertEqual(fake_process_launcher(), launcher)
def test_process_launcher_on_windows(self):
self._test_service_launcher('nt')
def test_process_launcher_on_linux(self):
self._test_service_launcher('posix')
class WindowsProcessLauncherTestCase(test.TestCase):
@mock.patch.object(service, 'os_win_utilsfactory', create=True)
@mock.patch('oslo_service.service.SignalHandler')
def setUp(self, mock_signal_handler_cls, mock_utilsfactory):
super(WindowsProcessLauncherTestCase, self).setUp()
self._signal_handler = mock_signal_handler_cls.return_value
self._processutils = mock_utilsfactory.get_processutils.return_value
self._launcher = service.WindowsProcessLauncher()
def test_setup_signal_handlers(self):
exp_signal_map = {'SIGINT': self._launcher._terminate,
'SIGTERM': self._launcher._terminate}
self._signal_handler.add_handler.assert_has_calls(
[mock.call(signal, handler)
for signal, handler in exp_signal_map.items()],
any_order=True)
@mock.patch('sys.exit')
def test_terminate_handler(self, mock_exit):
self._launcher._terminate(mock.sentinel.signum, mock.sentinel.frame)
mock_exit.assert_called_once_with(1)
@mock.patch('subprocess.Popen')
def test_launch(self, mock_popen):
mock_workers = [mock.Mock(), mock.Mock(), mock.Mock()]
mock_popen.side_effect = mock_workers
self._processutils.kill_process_on_job_close.side_effect = [
exception.CinderException, None, None]
# We expect the first process to be cleaned up after failing
# to setup a job object.
self.assertRaises(exception.CinderException,
self._launcher.add_process,
mock.sentinel.cmd1)
mock_workers[0].kill.assert_called_once_with()
self._launcher.add_process(mock.sentinel.cmd2)
self._launcher.add_process(mock.sentinel.cmd3)
mock_popen.assert_has_calls(
[mock.call(cmd)
for cmd in [mock.sentinel.cmd1,
mock.sentinel.cmd2,
mock.sentinel.cmd3]])
self._processutils.kill_process_on_job_close.assert_has_calls(
[mock.call(worker.pid) for worker in mock_workers[1:]])
self._launcher.wait()
wait_processes = self._processutils.wait_for_multiple_processes
wait_processes.assert_called_once_with(
[worker.pid for worker in mock_workers[1:]],
wait_all=True)
|
|
import random
class Move(object):
LEFT = 'left'
UP = 'up'
RIGHT = 'right'
DOWN = 'down'
class Board(object):
def __init__(self, n_init_tiles):
self.BOARD_SIZE = 4
self.board_data = self.initialize_board(n_init_tiles)
self.__score = 0
def generate_board(self):
'''Generate a None board'''
return [[None] * self.BOARD_SIZE for i in ' ' * self.BOARD_SIZE]
def generate_line(self):
'''Generate a line in board'''
return [None] * self.BOARD_SIZE
def initialize_board(self, n):
'''Generate a board with n tiles whose value is not None'''
base_num = self.generate_board()
index_list = [[i, j]
for i in range(0, self.BOARD_SIZE)
for j in range(0, self.BOARD_SIZE)]
for i in range(0, n):
r_num = random.randint(0, len(index_list) - 1)
temp = index_list[r_num]
base_num[temp[0]][temp[1]] = 2
index_list.remove(temp)
return base_num
def rotate(self, board):
'''Rotate board 90 degree clockwise'''
new_board = self.generate_board()
for i in range(0, self.BOARD_SIZE):
for j in range(0, self.BOARD_SIZE):
new_board[i][j] = board[self.BOARD_SIZE - 1 - j][i]
return new_board
def get_through(self, line, a, b):
'''return True if all the tiles between position a and position b are None'''
gap = abs(a - b)
if line[a] == None or line[b] == None:
return False
else:
if gap == 0:
return False
elif gap == 1:
return True
else:
for x in range(1, gap):
if line[min(a, b) + x] != None:
return False
return True
def can_merge(self, line):
'''return True if two occupied tiles can be merged into one'''
for i in range(0, self.BOARD_SIZE):
for j in range(0, self.BOARD_SIZE):
if self.get_through(line, i, j) and line[i] == line[j]:
return True
return False
def can_move_line(self, line):
'''Return True if there is no valued tile between line[a] and line[b]'''
count = 0
for i in line: # How many tiles are occupied in a line
if i != None:
count = count + 1
if count == 0:
return False
elif count == 1:
if line[0] != None:
return False
return True
else:
if self.can_merge(line):
return True
else:
for i in range(count, self.BOARD_SIZE):
if line[i] != None:
return True
return False
def can_move_left(self):
for line in self.board_data:
if self.can_move_line(line):
return True
return False
def can_move_right(self):
temp_board = self.generate_board()
temp_board = self.rotate(self.rotate(self.board_data))
for line in temp_board:
if self.can_move_line(line):
return True
return False
def can_move_up(self):
temp_board = self.generate_board()
temp_board = self.rotate(self.rotate(self.rotate(self.board_data)))
for line in temp_board:
if self.can_move_line(line):
return True
return False
def can_move_down(self):
temp_board = self.generate_board()
temp_board = self.rotate(self.board_data)
for line in temp_board:
if self.can_move_line(line):
return True
return False
def can_move(self):
return self.can_move_left() or \
self.can_move_right() or \
self.can_move_up() or \
self.can_move_down()
def move_to_beginning(self, line):
'''Move all the tiles with value to the beginning of the line'''
count = 0
line1 = self.generate_line()
for i in line:
if i != None:
line1[count] = i
count = count + 1
return line1
def left_move(self, board):
'''Move the tiles to the left, merge two tiles with the same value if they can_move()'''
count = 0
for line in board:
occupied_tile = 0
for i in line:
if i != None:
occupied_tile = occupied_tile + 1
if occupied_tile == 1 and line[0] == None:
board[count] = self.move_to_beginning(line)
if occupied_tile > 1:
if not self.can_merge(line):
board[count] = self.move_to_beginning(line)
else:
status = [False] * self.BOARD_SIZE
for i in range(0, self.BOARD_SIZE - 1):
for j in range(i + 1, self.BOARD_SIZE):
if self.get_through(line, i, j) and line[i] == line[j] and status[i] == False and status[j] == False:
line[i] = line[i] + line[j]
line[j] = None
self.__score = self.__score + line[i]
status[i] = True
status[j] = True
board[count] = self.move_to_beginning(line)
count = count + 1
return board
def up_move(self, board):
temp_board = self.generate_board()
temp_board = self.rotate(self.rotate(self.rotate(board)))
temp_board = self.left_move(temp_board)
new_board = self.generate_board()
new_board = self.rotate(temp_board)
return new_board
def right_move(self, board):
temp_board = self.generate_board()
temp_board = self.rotate(self.rotate(board))
temp_board = self.left_move(temp_board)
new_board = self.generate_board()
new_board = self.rotate(self.rotate(temp_board))
return new_board
def down_move(self, board):
temp_board = self.generate_board()
temp_board = self.rotate(board)
temp_board = self.left_move(temp_board)
new_board = self.generate_board()
new_board = self.rotate(self.rotate(self.rotate(temp_board)))
return new_board
KEY_MOVE_MAP = {
Move.LEFT : left_move,
Move.RIGHT : right_move,
Move.UP : up_move,
Move.DOWN : down_move,
}
def generate_tile(self, board):
index_list = []
for i in range(0, self.BOARD_SIZE):
for j in range(0, self.BOARD_SIZE):
if board[i][j] == None:
temp = [i, j]
index_list.append(temp)
r_num = random.randint(0, len(index_list) - 1)
pos = index_list[r_num]
board[pos[0]][pos[1]] = 2
return board
def idle_tile(self, board):
count = 0
for i in range(0, self.BOARD_SIZE):
for j in range(0, self.BOARD_SIZE):
if board[i][j] == None:
count = count + 1
return count
def output(self):
'''print board'''
for i in self.board_data:
s = []
for j in i:
if j == None:
s.append('_'.center(4))
else:
s.append(str(j).center(4))
print ' '.join(s)
def get_tiles(self):
tile_list = []
for i in self.board_data:
tile_list.append(i)
return tile_list
def copy_board(self, board):
new_board = self.generate_board()
for i in range(0, self.BOARD_SIZE):
for j in range(0, self.BOARD_SIZE):
new_board[i][j] = board[i][j]
return new_board
def is_equal(self, lhs, rhs):
for i in range(0, self.BOARD_SIZE):
for j in range(0, self.BOARD_SIZE):
if lhs[i][j] != rhs[i][j]:
return False
return True
def move(self, move_action):
'''Move the board to the direction specified by move_action
move_action should be one of the values in Move enum'''
old_board = self.copy_board(self.board_data)
self.board_data = self.KEY_MOVE_MAP[move_action](self, self.board_data)
if not self.is_equal(old_board, self.board_data):
self.board_data = self.generate_tile(self.board_data)
def get_score(self):
return self.__score
def set_score(self, score):
self.__score = score
|
|
"""Functions for generating interesting polynomials, e.g. for benchmarking. """
from sympy.core import Add, Mul, Symbol, sympify, Dummy, symbols
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.core.singleton import S
from sympy.polys.polytools import Poly, PurePoly
from sympy.polys.polyutils import _analyze_gens
from sympy.polys.polyclasses import DMP
from sympy.polys.densebasic import (
dmp_zero, dmp_one, dmp_ground, dmp_normal,
dup_from_raw_dict, dmp_raise, dup_random
)
from sympy.polys.densearith import (
dmp_add_term, dmp_neg, dmp_mul, dmp_sqr
)
from sympy.polys.factortools import (
dup_zz_cyclotomic_poly
)
from sympy.polys.domains import ZZ
from sympy.ntheory import nextprime
from sympy.utilities import cythonized, subsets
@cythonized("n,i")
def swinnerton_dyer_poly(n, x=None, **args):
"""Generates n-th Swinnerton-Dyer polynomial in `x`. """
if n <= 0:
raise ValueError("can't generate Swinnerton-Dyer polynomial of order %s" % n)
if x is not None:
x, cls = sympify(x), Poly
else:
x, cls = Dummy('x'), PurePoly
p, elts = 2, [[x, -sqrt(2)],
[x, sqrt(2)]]
for i in xrange(2, n+1):
p, _elts = nextprime(p), []
neg_sqrt = -sqrt(p)
pos_sqrt = +sqrt(p)
for elt in elts:
_elts.append(elt + [neg_sqrt])
_elts.append(elt + [pos_sqrt])
elts = _elts
poly = []
for elt in elts:
poly.append(Add(*elt))
if not args.get('polys', False):
return Mul(*poly).expand()
else:
return PurePoly(Mul(*poly), x)
def cyclotomic_poly(n, x=None, **args):
"""Generates cyclotomic polynomial of order `n` in `x`. """
if n <= 0:
raise ValueError("can't generate cyclotomic polynomial of order %s" % n)
poly = DMP(dup_zz_cyclotomic_poly(int(n), ZZ), ZZ)
if x is not None:
poly = Poly.new(poly, x)
else:
poly = PurePoly.new(poly, Dummy('x'))
if not args.get('polys', False):
return poly.as_expr()
else:
return poly
def symmetric_poly(n, *gens, **args):
"""Generates symmetric polynomial of order `n`. """
gens = _analyze_gens(gens)
if n < 0 or n > len(gens) or not gens:
raise ValueError("can't generate symmetric polynomial of order %s for %s" % (n, gens))
elif not n:
poly = S.One
else:
poly = Add(*[ Mul(*s) for s in subsets(gens, int(n)) ])
if not args.get('polys', False):
return poly
else:
return Poly(poly, *gens)
def random_poly(x, n, inf, sup, domain=ZZ, polys=False):
"""Return a polynomial of degree ``n`` with coefficients in ``[inf, sup]``. """
poly = Poly(dup_random(n, inf, sup, domain), x, domain=domain)
if not polys:
return poly.as_expr()
else:
return poly
@cythonized("n,i,j")
def interpolating_poly(n, x, X='x', Y='y'):
"""Construct Lagrange interpolating polynomial for ``n`` data points. """
if isinstance(X, str):
X = symbols("%s:%s" % (X, n))
if isinstance(Y, str):
Y = symbols("%s:%s" % (Y, n))
coeffs = []
for i in xrange(0, n):
numer = []
denom = []
for j in xrange(0, n):
if i == j:
continue
numer.append(x - X[j])
denom.append(X[i] - X[j])
numer = Mul(*numer)
denom = Mul(*denom)
coeffs.append(numer/denom)
return Add(*[ coeff*y for coeff, y in zip(coeffs, Y) ])
@cythonized("n,i")
def fateman_poly_F_1(n):
"""Fateman's GCD benchmark: trivial GCD """
Y = [ Symbol('y_' + str(i)) for i in xrange(0, n+1) ]
y_0, y_1 = Y[0], Y[1]
u = y_0 + Add(*[ y for y in Y[1:] ])
v = y_0**2 + Add(*[ y**2 for y in Y[1:] ])
F = ((u + 1)*(u + 2)).as_poly(*Y)
G = ((v + 1)*(-3*y_1*y_0**2 + y_1**2 - 1)).as_poly(*Y)
H = Poly(1, *Y)
return F, G, H
@cythonized("n,m,i")
def dmp_fateman_poly_F_1(n, K):
"""Fateman's GCD benchmark: trivial GCD """
u = [K(1), K(0)]
for i in xrange(0, n):
u = [dmp_one(i, K), u]
v = [K(1), K(0), K(0)]
for i in xrange(0, n):
v = [dmp_one(i, K), dmp_zero(i), v]
m = n-1
U = dmp_add_term(u, dmp_ground(K(1), m), 0, n, K)
V = dmp_add_term(u, dmp_ground(K(2), m), 0, n, K)
f = [[-K(3), K(0)], [], [K(1), K(0), -K(1)]]
W = dmp_add_term(v, dmp_ground(K(1), m), 0, n, K)
Y = dmp_raise(f, m, 1, K)
F = dmp_mul(U, V, n, K)
G = dmp_mul(W, Y, n, K)
H = dmp_one(n, K)
return F, G, H
@cythonized("n,i")
def fateman_poly_F_2(n):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
Y = [ Symbol('y_' + str(i)) for i in xrange(0, n+1) ]
y_0 = Y[0]
u = Add(*[ y for y in Y[1:] ])
H = Poly((y_0 + u + 1)**2, *Y)
F = Poly((y_0 - u - 2)**2, *Y)
G = Poly((y_0 + u + 2)**2, *Y)
return H*F, H*G, H
@cythonized("n,m,i")
def dmp_fateman_poly_F_2(n, K):
"""Fateman's GCD benchmark: linearly dense quartic inputs """
u = [K(1), K(0)]
for i in xrange(0, n-1):
u = [dmp_one(i, K), u]
m = n-1
v = dmp_add_term(u, dmp_ground(K(2), m-1), 0, n, K)
f = dmp_sqr([dmp_one(m, K), dmp_neg(v, m, K)], n, K)
g = dmp_sqr([dmp_one(m, K), v], n, K)
v = dmp_add_term(u, dmp_one(m-1, K), 0, n, K)
h = dmp_sqr([dmp_one(m, K), v], n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
@cythonized("n,i")
def fateman_poly_F_3(n):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
Y = [ Symbol('y_' + str(i)) for i in xrange(0, n+1) ]
y_0 = Y[0]
u = Add(*[ y**(n+1) for y in Y[1:] ])
H = Poly((y_0**(n+1) + u + 1)**2, *Y)
F = Poly((y_0**(n+1) - u - 2)**2, *Y)
G = Poly((y_0**(n+1) + u + 2)**2, *Y)
return H*F, H*G, H
@cythonized("n,i")
def dmp_fateman_poly_F_3(n, K):
"""Fateman's GCD benchmark: sparse inputs (deg f ~ vars f) """
u = dup_from_raw_dict({n+1: K.one}, K)
for i in xrange(0, n-1):
u = dmp_add_term([u], dmp_one(i, K), n+1, i+1, K)
v = dmp_add_term(u, dmp_ground(K(2), n-2), 0, n, K)
f = dmp_sqr(dmp_add_term([dmp_neg(v, n-1, K)], dmp_one(n-1, K), n+1, n, K), n, K)
g = dmp_sqr(dmp_add_term([v], dmp_one(n-1, K), n+1, n, K), n, K)
v = dmp_add_term(u, dmp_one(n-2, K), 0, n-1, K)
h = dmp_sqr(dmp_add_term([v], dmp_one(n-1, K), n+1, n, K), n, K)
return dmp_mul(f, h, n, K), dmp_mul(g, h, n, K), h
# A few useful polynomials from Wang's paper ('78).
f_0 = dmp_normal([
[[1,2,3], [2]],
[[3]],
[[4,5,6], [1,2,1], [1]]
], 2, ZZ)
f_1 = dmp_normal([
[[1, 0], []],
[[1, 0, 1], [20, 30], [1, 10, 0]],
[[1, 0], [30, 20], [1, 10, 1, 610], [20, 230, 300]],
[[1, 10, 0], [30, 320, 200], [600, 6000]]
], 2, ZZ)
f_2 = dmp_normal([
[[1], [1, 0], [1, 0, 0], [1, 0, 0, 0]],
[[]],
[[1], [1, 90], [90, 0]],
[[1, -11], [], [1, -11, 0, 0]],
[[]],
[[1, -11], [90, -990]]
], 2, ZZ)
f_3 = dmp_normal([
[[1], [], []],
[[1, 0, 0, 0, 1]],
[[1, 0], [], [], [1, 0]],
[[1], [1, 0, 0, 0], [], [1, 0, 0, 0, 1, 0], []],
[[1, 0, 0, 0, 1], [1, 0, 0, 0, 1, 1, 0, 0], []],
[[1, 0], [1, 0, 0, 0, 0], []]
], 2, ZZ)
f_4 = dmp_normal([
[[-1, 0], [], [], [], [], [], [], [], []],
[[-1, 0, 0, 0], [], [], [], [], []],
[[-1, 0, 0], [], [], [], [-5], [], [], [], [], [], [], [], []],
[[-1, 0, 0, 0, 0], [], [1, 0, 3, 0], [], [-5, 0, 0], [-1, 0, 0, 0], [], [], [], []],
[[1, 0, 3, 0, 0, 0], [], [], [-1, 0, 0, 0, 0, 0], []],
[[1, 0, 3, 0, 0], [], [], [-1, 0, 0, 0, 0], [5, 0, 15], [], [], [-5, 0, 0], [], [], [], []],
[[1, 0, 3, 0, 0, 0, 0], [], [], [-1, 0, 0, 0, 0, 0, 0], [5, 0, 15, 0, 0], [1, 0, 3, 0, 0, 0], [], [-5, 0, 0, 0, 0], []],
[[1, 0, 3, 0, 0, 0, 0, 0]],
[[1, 0, 3, 0, 0, 0, 0], [], [], [], [5, 0, 15, 0, 0], [], [], []],
[[1, 0, 3, 0, 0, 0, 0, 0, 0], [], [], [], [5, 0, 15, 0, 0, 0, 0]]
], 2, ZZ)
f_5 = dmp_normal([
[[-1]],
[[-3], [3, 0]],
[[-3], [6, 0], [-3, 0, 0]],
[[-1], [3, 0], [-3, 0, 0], [1, 0, 0, 0]]
], 2, ZZ)
f_6 = dmp_normal([
[[[2115]], [[]]],
[[[45, 0, 0], [], [], [-45, 0, 0]]],
[[[]]],
[[[-423]], [[-47]], [[]], [[141], [], [94, 0], []], [[]]],
[[[-9, 0, 0], [], [], [9, 0, 0]],
[[-1, 0, 0], [], [], [1, 0, 0]],
[[]],
[[3, 0, 0], [], [2, 0, 0, 0], [-3, 0, 0], [], [-2, 0, 0, 0], []]
]
], 3, ZZ)
w_1 = dmp_normal([
[[4, 0, 0], [4, 0, 0, 0], [-4, 0, 0, 0, 0], [-4, 0, 0, 0, 0, 0], []],
[[1, 0, 0, 0], [12, 0], [-1, 0, 0, 12, 0, 0], [-12, 0, 0, 0], [-12, 0, 0, 0, 0]],
[[8], [6, 8, 0], [-4, 4, -8, 0, 0], [-4, -2, -8, 0, 0, 0], []],
[[2, 0], [1, 0, 0, 0], [-1, 0, -2 , 0, 9, 0], [-12, 12, 0, 0], [-12, 3, 0, 0, 0]],
[[6], [-6, 8, 0], [-2, -8, 2, 0, 0], []],
[[2, 0], [-2, 0, 0, 0], [-3, 0], [3, 0, 0, 0]],
[[-2], [2, 0, 0], []]
], 2, ZZ)
w_2 = dmp_normal([
[24, 48, 0, 0],
[24, 0, 0, -72, 0, 0],
[25, 2, 0, 4, 8],
[1, 0, 0, 1, 0, 0, -12],
[1, -1, -2, 292, 0, 0],
[-1, 0, 0, 3, 0, 0, 0],
[-1, 0, 12, 0, 0, 48],
[],
[-12, 0, 0, 0]
], 1, ZZ)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Libcloud Python 2.x and 3.x compatibility layer
# Some methods below are taken from Django PYK3 port which is licensed under 3
# clause BSD license
# https://bitbucket.org/loewis/django-3k
# pylint: disable=import-error
from __future__ import absolute_import
import sys
import types
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY2_pre_25 = PY2 and sys.version_info < (2, 5)
PY2_pre_26 = PY2 and sys.version_info < (2, 6)
PY2_pre_27 = PY2 and sys.version_info < (2, 7)
PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9)
PY3_pre_32 = PY3 and sys.version_info < (3, 2)
PY2 = False
PY25 = False
PY26 = False
PY27 = False
PY3 = False
PY32 = False
if sys.version_info >= (2, 0) and sys.version_info < (3, 0):
PY2 = True
if sys.version_info >= (2, 5) and sys.version_info < (2, 6):
PY25 = True
if sys.version_info >= (2, 6) and sys.version_info < (2, 7):
PY26 = True
if sys.version_info >= (2, 7) and sys.version_info < (2, 8):
PY27 = True
if sys.version_info >= (3, 0):
PY3 = True
if sys.version_info >= (3, 2) and sys.version_info < (3, 3):
PY32 = True
if PY2_pre_279 or PY3_pre_32:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError # NOQA
except ImportError:
import warnings
warnings.warn("Missing backports.ssl_match_hostname package")
else:
# ssl module in Python >= 3.2 includes match hostname function
from ssl import match_hostname, CertificateError # NOQA
if PY3:
import http.client as httplib
from io import StringIO
import urllib
import urllib as urllib2
# pylint: disable=no-name-in-module
import urllib.parse as urlparse
import xmlrpc.client as xmlrpclib
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from urllib.parse import urlencode as urlencode
from os.path import relpath
from imp import reload
from builtins import bytes
from builtins import next
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
basestring = str
def method_type(callable, instance, klass):
return types.MethodType(callable, instance or klass())
def b(s):
if isinstance(s, str):
return s.encode('utf-8')
elif isinstance(s, bytes):
return s
elif isinstance(s, int):
return bytes([s])
else:
raise TypeError("Invalid argument %r for b()" % (s,))
def ensure_string(s):
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode('utf-8')
else:
raise TypeError("Invalid argument %r for ensure_string()" % (s,))
def byte(n):
# assume n is a Latin-1 string of length 1
return ord(n)
_real_unicode = str
u = str
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return bytes([s])
def dictvalues(d):
return list(d.values())
def tostring(node):
return ET.tostring(node, encoding='unicode')
def hexadigits(s):
# s needs to be a byte string.
return [format(x, "x") for x in s]
else:
import httplib # NOQA
from StringIO import StringIO # NOQA
import urllib # NOQA
import urllib2 # NOQA
import urlparse # NOQA
import xmlrpclib # NOQA
from urllib import quote as _urlquote # NOQA
from urllib import unquote as urlunquote # NOQA
from urllib import urlencode as urlencode # NOQA
from __builtin__ import reload # NOQA
if PY25:
import cgi
parse_qs = cgi.parse_qs
parse_qsl = cgi.parse_qsl
else:
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
if not PY25:
from os.path import relpath # NOQA
# Save the real value of unicode because urlquote needs it to tell the
# difference between a unicode string and a byte string.
_real_unicode = unicode
basestring = unicode = str
method_type = types.MethodType
b = bytes = ensure_string = str
def byte(n):
return n
u = unicode
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return chr(s)
_default_value_next = object()
def next(iterator, default=_default_value_next):
try:
return iterator.next()
except StopIteration:
if default is _default_value_next:
raise
return default
def dictvalues(d):
return d.values()
tostring = ET.tostring
def urlquote(s, safe='/'):
if isinstance(s, _real_unicode):
# Pretend to be py3 by encoding the URI automatically.
s = s.encode('utf8')
return _urlquote(s, safe)
def hexadigits(s):
# s needs to be a string.
return [x.encode("hex") for x in s]
if PY25:
import posixpath
# Taken from http://jimmyg.org/work/code/barenecessities/index.html
# (MIT license)
# pylint: disable=function-redefined
def relpath(path, start=posixpath.curdir): # NOQA
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
if PY27 or PY3:
unittest2_required = False
else:
unittest2_required = True
|
|
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ImageCache class and helper functions."""
import os
import tempfile
import time
import uuid
import mock
from oslo_utils import uuidutils
from ironic.common import exception
from ironic.common import image_service
from ironic.common import images
from ironic.common import utils
from ironic.drivers.modules import image_cache
from ironic.tests import base
def touch(filename):
open(filename, 'w').close()
@mock.patch.object(image_cache, '_fetch')
class TestImageCacheFetch(base.TestCase):
def setUp(self):
super(TestImageCacheFetch, self).setUp()
self.master_dir = tempfile.mkdtemp()
self.cache = image_cache.ImageCache(self.master_dir, None, None)
self.dest_dir = tempfile.mkdtemp()
self.dest_path = os.path.join(self.dest_dir, 'dest')
self.uuid = uuidutils.generate_uuid()
self.master_path = os.path.join(self.master_dir, self.uuid)
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image_no_master_dir(self, mock_download, mock_clean_up,
mock_fetch):
self.cache.master_dir = None
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_download.called)
mock_fetch.assert_called_once_with(
None, self.uuid, self.dest_path, None, True)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(os, 'unlink')
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image_dest_and_master_exist_uptodate(self, mock_download,
mock_clean_up, mock_unlink, mock_fetch):
touch(self.master_path)
os.link(self.master_path, self.dest_path)
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_unlink.called)
self.assertFalse(mock_download.called)
self.assertFalse(mock_fetch.called)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image_dest_and_master_exist_outdated(self, mock_download,
mock_clean_up, mock_fetch):
touch(self.master_path)
touch(self.dest_path)
self.assertNotEqual(os.stat(self.dest_path).st_ino,
os.stat(self.master_path).st_ino)
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_download.called)
self.assertFalse(mock_fetch.called)
self.assertTrue(os.path.isfile(self.dest_path))
self.assertEqual(os.stat(self.dest_path).st_ino,
os.stat(self.master_path).st_ino)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(os, 'unlink')
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image_only_dest_exists(self, mock_download,
mock_clean_up, mock_unlink, mock_fetch):
touch(self.dest_path)
self.cache.fetch_image(self.uuid, self.dest_path)
mock_unlink.assert_called_once_with(self.dest_path)
self.assertFalse(mock_fetch.called)
mock_download.assert_called_once_with(
self.uuid, self.master_path, self.dest_path,
ctx=None, force_raw=True)
self.assertTrue(mock_clean_up.called)
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image_master_exists(self, mock_download, mock_clean_up,
mock_fetch):
touch(self.master_path)
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_download.called)
self.assertFalse(mock_fetch.called)
self.assertTrue(os.path.isfile(self.dest_path))
self.assertEqual(os.stat(self.dest_path).st_ino,
os.stat(self.master_path).st_ino)
self.assertFalse(mock_clean_up.called)
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image(self, mock_download, mock_clean_up,
mock_fetch):
self.cache.fetch_image(self.uuid, self.dest_path)
self.assertFalse(mock_fetch.called)
mock_download.assert_called_once_with(
self.uuid, self.master_path, self.dest_path,
ctx=None, force_raw=True)
self.assertTrue(mock_clean_up.called)
@mock.patch.object(image_cache.ImageCache, 'clean_up')
@mock.patch.object(image_cache.ImageCache, '_download_image')
def test_fetch_image_not_uuid(self, mock_download, mock_clean_up,
mock_fetch):
href = u'http://abc.com/ubuntu.qcow2'
href_converted = str(uuid.uuid5(uuid.NAMESPACE_URL,
href.encode('utf-8')))
master_path = os.path.join(self.master_dir, href_converted)
self.cache.fetch_image(href, self.dest_path)
self.assertFalse(mock_fetch.called)
mock_download.assert_called_once_with(
href, master_path, self.dest_path,
ctx=None, force_raw=True)
self.assertTrue(mock_clean_up.called)
def test__download_image(self, mock_fetch):
def _fake_fetch(ctx, uuid, tmp_path, *args):
self.assertEqual(self.uuid, uuid)
self.assertNotEqual(self.dest_path, tmp_path)
self.assertNotEqual(os.path.dirname(tmp_path), self.master_dir)
with open(tmp_path, 'w') as fp:
fp.write("TEST")
mock_fetch.side_effect = _fake_fetch
self.cache._download_image(self.uuid, self.master_path, self.dest_path)
self.assertTrue(os.path.isfile(self.dest_path))
self.assertTrue(os.path.isfile(self.master_path))
self.assertEqual(os.stat(self.dest_path).st_ino,
os.stat(self.master_path).st_ino)
with open(self.dest_path) as fp:
self.assertEqual("TEST", fp.read())
class TestImageCacheCleanUp(base.TestCase):
def setUp(self):
super(TestImageCacheCleanUp, self).setUp()
self.master_dir = tempfile.mkdtemp()
self.cache = image_cache.ImageCache(self.master_dir,
cache_size=10,
cache_ttl=600)
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size')
def test_clean_up_old_deleted(self, mock_clean_size):
mock_clean_size.return_value = None
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
touch(filename)
# NOTE(dtantsur): Can't alter ctime, have to set mtime to the future
new_current_time = time.time() + 900
os.utime(files[0], (new_current_time - 100, new_current_time - 100))
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up()
mock_clean_size.assert_called_once_with(mock.ANY, None)
survived = mock_clean_size.call_args[0][0]
self.assertEqual(1, len(survived))
self.assertEqual(files[0], survived[0][0])
# NOTE(dtantsur): do not compare milliseconds
self.assertEqual(int(new_current_time - 100), int(survived[0][1]))
self.assertEqual(int(new_current_time - 100),
int(survived[0][2].st_mtime))
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size')
def test_clean_up_old_with_amount(self, mock_clean_size):
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
open(filename, 'wb').write('X')
new_current_time = time.time() + 900
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up(amount=1)
self.assertFalse(mock_clean_size.called)
# Exactly one file is expected to be deleted
self.assertTrue(any(os.path.exists(f) for f in files))
self.assertFalse(all(os.path.exists(f) for f in files))
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size')
def test_clean_up_files_with_links_untouched(self, mock_clean_size):
mock_clean_size.return_value = None
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
touch(filename)
os.link(filename, filename + 'copy')
new_current_time = time.time() + 900
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up()
for filename in files:
self.assertTrue(os.path.exists(filename))
mock_clean_size.assert_called_once_with([], None)
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old')
def test_clean_up_ensure_cache_size(self, mock_clean_ttl):
mock_clean_ttl.side_effect = lambda *xx: xx
# NOTE(dtantsur): Cache size in test is 10 bytes, we create 6 files
# with 3 bytes each and expect 3 to be deleted
files = [os.path.join(self.master_dir, str(i))
for i in range(6)]
for filename in files:
with open(filename, 'w') as fp:
fp.write('123')
# NOTE(dtantsur): Make 3 files 'newer' to check that
# old ones are deleted first
new_current_time = time.time() + 100
for filename in files[:3]:
os.utime(filename, (new_current_time, new_current_time))
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up()
for filename in files[:3]:
self.assertTrue(os.path.exists(filename))
for filename in files[3:]:
self.assertFalse(os.path.exists(filename))
mock_clean_ttl.assert_called_once_with(mock.ANY, None)
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old')
def test_clean_up_ensure_cache_size_with_amount(self, mock_clean_ttl):
mock_clean_ttl.side_effect = lambda *xx: xx
# NOTE(dtantsur): Cache size in test is 10 bytes, we create 6 files
# with 3 bytes each and set amount to be 15, 5 files are to be deleted
files = [os.path.join(self.master_dir, str(i))
for i in range(6)]
for filename in files:
with open(filename, 'w') as fp:
fp.write('123')
# NOTE(dtantsur): Make 1 file 'newer' to check that
# old ones are deleted first
new_current_time = time.time() + 100
os.utime(files[0], (new_current_time, new_current_time))
with mock.patch.object(time, 'time', lambda: new_current_time):
self.cache.clean_up(amount=15)
self.assertTrue(os.path.exists(files[0]))
for filename in files[5:]:
self.assertFalse(os.path.exists(filename))
mock_clean_ttl.assert_called_once_with(mock.ANY, 15)
@mock.patch.object(image_cache.LOG, 'info')
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old')
def test_clean_up_cache_still_large(self, mock_clean_ttl, mock_log):
mock_clean_ttl.side_effect = lambda *xx: xx
# NOTE(dtantsur): Cache size in test is 10 bytes, we create 2 files
# than cannot be deleted and expected this to be logged
files = [os.path.join(self.master_dir, str(i))
for i in range(2)]
for filename in files:
with open(filename, 'w') as fp:
fp.write('123')
os.link(filename, filename + 'copy')
self.cache.clean_up()
for filename in files:
self.assertTrue(os.path.exists(filename))
self.assertTrue(mock_log.called)
mock_clean_ttl.assert_called_once_with(mock.ANY, None)
@mock.patch.object(utils, 'rmtree_without_raise')
@mock.patch.object(image_cache, '_fetch')
def test_temp_images_not_cleaned(self, mock_fetch, mock_rmtree):
def _fake_fetch(ctx, uuid, tmp_path, *args):
with open(tmp_path, 'w') as fp:
fp.write("TEST" * 10)
# assume cleanup from another thread at this moment
self.cache.clean_up()
self.assertTrue(os.path.exists(tmp_path))
mock_fetch.side_effect = _fake_fetch
master_path = os.path.join(self.master_dir, 'uuid')
dest_path = os.path.join(tempfile.mkdtemp(), 'dest')
self.cache._download_image('uuid', master_path, dest_path)
self.assertTrue(mock_rmtree.called)
@mock.patch.object(utils, 'rmtree_without_raise')
@mock.patch.object(image_cache, '_fetch')
def test_temp_dir_exception(self, mock_fetch, mock_rmtree):
mock_fetch.side_effect = exception.IronicException
self.assertRaises(exception.IronicException,
self.cache._download_image,
'uuid', 'fake', 'fake')
self.assertTrue(mock_rmtree.called)
@mock.patch.object(image_cache.LOG, 'warn')
@mock.patch.object(image_cache.ImageCache, '_clean_up_too_old')
@mock.patch.object(image_cache.ImageCache, '_clean_up_ensure_cache_size')
def test_clean_up_amount_not_satisfied(self, mock_clean_size,
mock_clean_ttl, mock_log):
mock_clean_ttl.side_effect = lambda *xx: xx
mock_clean_size.side_effect = lambda listing, amount: amount
self.cache.clean_up(amount=15)
self.assertTrue(mock_log.called)
def test_cleanup_ordering(self):
class ParentCache(image_cache.ImageCache):
def __init__(self):
super(ParentCache, self).__init__('a', 1, 1, None)
@image_cache.cleanup(priority=10000)
class Cache1(ParentCache):
pass
@image_cache.cleanup(priority=20000)
class Cache2(ParentCache):
pass
@image_cache.cleanup(priority=10000)
class Cache3(ParentCache):
pass
self.assertEqual(image_cache._cache_cleanup_list[0][1], Cache2)
# The order of caches with same prioirty is not deterministic.
item_possibilities = [Cache1, Cache3]
second_item_actual = image_cache._cache_cleanup_list[1][1]
self.assertIn(second_item_actual, item_possibilities)
item_possibilities.remove(second_item_actual)
third_item_actual = image_cache._cache_cleanup_list[2][1]
self.assertEqual(item_possibilities[0], third_item_actual)
@mock.patch.object(image_cache, '_cache_cleanup_list')
@mock.patch.object(os, 'statvfs')
@mock.patch.object(image_service, 'get_image_service')
class CleanupImageCacheTestCase(base.TestCase):
def setUp(self):
super(CleanupImageCacheTestCase, self).setUp()
self.mock_first_cache = mock.MagicMock()
self.mock_second_cache = mock.MagicMock()
self.cache_cleanup_list = [(50, self.mock_first_cache),
(20, self.mock_second_cache)]
self.mock_first_cache.return_value.master_dir = 'first_cache_dir'
self.mock_second_cache.return_value.master_dir = 'second_cache_dir'
def test_no_clean_up(self, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Enough space found - no clean up
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.return_value = mock.Mock(f_frsize=1, f_bavail=1024)
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_once_with('master_dir')
self.assertFalse(self.mock_first_cache.return_value.clean_up.called)
self.assertFalse(self.mock_second_cache.return_value.clean_up.called)
mock_statvfs.assert_called_once_with('master_dir')
@mock.patch.object(os, 'stat')
def test_one_clean_up(self, mock_stat, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Not enough space, first cache clean up is enough
mock_stat.return_value.st_dev = 1
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.side_effect = [
mock.Mock(f_frsize=1, f_bavail=1),
mock.Mock(f_frsize=1, f_bavail=1024)
]
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(2, mock_statvfs.call_count)
self.mock_first_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.assertFalse(self.mock_second_cache.return_value.clean_up.called)
# Since we are using generator expression in clean_up_caches, stat on
# second cache wouldn't be called if we got enough free space on
# cleaning up the first cache.
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
@mock.patch.object(os, 'stat')
def test_clean_up_another_fs(self, mock_stat, mock_image_service,
mock_statvfs, cache_cleanup_list_mock):
# Not enough space, need to cleanup second cache
mock_stat.side_effect = [mock.Mock(st_dev=1),
mock.Mock(st_dev=2),
mock.Mock(st_dev=1)]
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.side_effect = [
mock.Mock(f_frsize=1, f_bavail=1),
mock.Mock(f_frsize=1, f_bavail=1024)
]
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(2, mock_statvfs.call_count)
self.mock_second_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.assertFalse(self.mock_first_cache.return_value.clean_up.called)
# Since first cache exists on a different partition, it wouldn't be
# considered for cleanup.
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir'),
mock.call('second_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
@mock.patch.object(os, 'stat')
def test_both_clean_up(self, mock_stat, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Not enough space, clean up of both caches required
mock_stat.return_value.st_dev = 1
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.side_effect = [
mock.Mock(f_frsize=1, f_bavail=1),
mock.Mock(f_frsize=1, f_bavail=2),
mock.Mock(f_frsize=1, f_bavail=1024)
]
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
image_cache.clean_up_caches(None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(3, mock_statvfs.call_count)
self.mock_first_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.mock_second_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 2))
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir'),
mock.call('second_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
@mock.patch.object(os, 'stat')
def test_clean_up_fail(self, mock_stat, mock_image_service, mock_statvfs,
cache_cleanup_list_mock):
# Not enough space even after cleaning both caches - failure
mock_stat.return_value.st_dev = 1
mock_show = mock_image_service.return_value.show
mock_show.return_value = dict(size=42)
mock_statvfs.return_value = mock.Mock(f_frsize=1, f_bavail=1)
cache_cleanup_list_mock.__iter__.return_value = self.cache_cleanup_list
self.assertRaises(exception.InsufficientDiskSpace,
image_cache.clean_up_caches,
None, 'master_dir', [('uuid', 'path')])
mock_show.assert_called_once_with('uuid')
mock_statvfs.assert_called_with('master_dir')
self.assertEqual(3, mock_statvfs.call_count)
self.mock_first_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
self.mock_second_cache.return_value.clean_up.assert_called_once_with(
amount=(42 - 1))
mock_stat_calls_expected = [mock.call('master_dir'),
mock.call('first_cache_dir'),
mock.call('second_cache_dir')]
mock_statvfs_calls_expected = [mock.call('master_dir'),
mock.call('master_dir'),
mock.call('master_dir')]
self.assertEqual(mock_stat_calls_expected, mock_stat.mock_calls)
self.assertEqual(mock_statvfs_calls_expected, mock_statvfs.mock_calls)
class TestFetchCleanup(base.TestCase):
@mock.patch.object(images, 'converted_size')
@mock.patch.object(images, 'fetch')
@mock.patch.object(images, 'image_to_raw')
@mock.patch.object(image_cache, '_clean_up_caches')
def test__fetch(self, mock_clean, mock_raw, mock_fetch, mock_size):
mock_size.return_value = 100
image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
mock_fetch.assert_called_once_with('fake', 'fake-uuid',
'/foo/bar.part', None,
force_raw=False)
mock_clean.assert_called_once_with('/foo', 100)
mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
'/foo/bar.part')
|
|
""" Cisco_IOS_XE_types
Cisco XE Native Common Type Definitions
Copyright (c) 2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AccessListInOutTypeEnum(Enum):
"""
AccessListInOutTypeEnum
.. data:: in_ = 0
.. data:: out = 1
"""
in_ = 0
out = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['AccessListInOutTypeEnum']
class AclTcpPortTypeEnum(Enum):
"""
AclTcpPortTypeEnum
.. data:: bgp = 0
.. data:: chargen = 1
.. data:: cmd = 2
.. data:: connectedapps_plain = 3
.. data:: connectedapps_tls = 4
.. data:: daytime = 5
.. data:: discard = 6
.. data:: domain = 7
.. data:: echo = 8
.. data:: exec_ = 9
.. data:: finger = 10
.. data:: ftp = 11
.. data:: ftp_data = 12
.. data:: gopher = 13
.. data:: hostname = 14
.. data:: ident = 15
.. data:: irc = 16
.. data:: klogin = 17
.. data:: kshell = 18
.. data:: login = 19
.. data:: lpd = 20
.. data:: msrpc = 21
.. data:: nntp = 22
.. data:: pim_auto_rp = 23
.. data:: pop2 = 24
.. data:: pop3 = 25
.. data:: smtp = 26
.. data:: sunrpc = 27
.. data:: syslog = 28
.. data:: tacacs = 29
.. data:: talk = 30
.. data:: telnet = 31
.. data:: time = 32
.. data:: uucp = 33
.. data:: whois = 34
.. data:: www = 35
"""
bgp = 0
chargen = 1
cmd = 2
connectedapps_plain = 3
connectedapps_tls = 4
daytime = 5
discard = 6
domain = 7
echo = 8
exec_ = 9
finger = 10
ftp = 11
ftp_data = 12
gopher = 13
hostname = 14
ident = 15
irc = 16
klogin = 17
kshell = 18
login = 19
lpd = 20
msrpc = 21
nntp = 22
pim_auto_rp = 23
pop2 = 24
pop3 = 25
smtp = 26
sunrpc = 27
syslog = 28
tacacs = 29
talk = 30
telnet = 31
time = 32
uucp = 33
whois = 34
www = 35
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['AclTcpPortTypeEnum']
class AclUdpPortTypeEnum(Enum):
"""
AclUdpPortTypeEnum
.. data:: biff = 0
.. data:: bootpc = 1
.. data:: bootps = 2
.. data:: discard = 3
.. data:: dnsix = 4
.. data:: domain = 5
.. data:: echo = 6
.. data:: isakmp = 7
.. data:: mobile_ip = 8
.. data:: nameserver = 9
.. data:: netbios_dgm = 10
.. data:: netbios_ns = 11
.. data:: netbios_ss = 12
.. data:: non500_isakmp = 13
.. data:: ntp = 14
.. data:: pim_auto_rp = 15
.. data:: rip = 16
.. data:: ripv6 = 17
.. data:: snmp = 18
.. data:: snmptrap = 19
.. data:: sunrpc = 20
.. data:: syslog = 21
.. data:: tacacs = 22
.. data:: talk = 23
.. data:: tftp = 24
.. data:: time = 25
.. data:: who = 26
.. data:: xdmcp = 27
"""
biff = 0
bootpc = 1
bootps = 2
discard = 3
dnsix = 4
domain = 5
echo = 6
isakmp = 7
mobile_ip = 8
nameserver = 9
netbios_dgm = 10
netbios_ns = 11
netbios_ss = 12
non500_isakmp = 13
ntp = 14
pim_auto_rp = 15
rip = 16
ripv6 = 17
snmp = 18
snmptrap = 19
sunrpc = 20
syslog = 21
tacacs = 22
talk = 23
tftp = 24
time = 25
who = 26
xdmcp = 27
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['AclUdpPortTypeEnum']
class Bgp_Ipv4_Af_TypeEnum(Enum):
"""
Bgp\_Ipv4\_Af\_TypeEnum
.. data:: unicast = 0
.. data:: multicast = 1
.. data:: mdt = 2
.. data:: tunnel = 3
.. data:: labeled_unicast = 4
.. data:: flowspec = 5
.. data:: mvpn = 6
"""
unicast = 0
multicast = 1
mdt = 2
tunnel = 3
labeled_unicast = 4
flowspec = 5
mvpn = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['Bgp_Ipv4_Af_TypeEnum']
class Bgp_Ipv6_Af_TypeEnum(Enum):
"""
Bgp\_Ipv6\_Af\_TypeEnum
.. data:: unicast = 0
.. data:: multicast = 1
.. data:: mdt = 2
.. data:: flowspec = 3
.. data:: mvpn = 4
"""
unicast = 0
multicast = 1
mdt = 2
flowspec = 3
mvpn = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['Bgp_Ipv6_Af_TypeEnum']
class CommunityWellKnownTypeEnum(Enum):
"""
CommunityWellKnownTypeEnum
.. data:: gshut = 0
.. data:: internet = 1
.. data:: local_AS = 2
.. data:: no_advertise = 3
.. data:: no_export = 4
"""
gshut = 0
internet = 1
local_AS = 2
no_advertise = 3
no_export = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['CommunityWellKnownTypeEnum']
class Cos_ValueTypeEnum(Enum):
"""
Cos\_ValueTypeEnum
.. data:: cos = 0
.. data:: dscp = 1
.. data:: exp = 2
.. data:: precedence = 3
"""
cos = 0
dscp = 1
exp = 2
precedence = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['Cos_ValueTypeEnum']
class DscpTypeEnum(Enum):
"""
DscpTypeEnum
.. data:: af11 = 10
.. data:: af12 = 12
.. data:: af13 = 14
.. data:: af21 = 18
.. data:: af22 = 20
.. data:: af23 = 22
.. data:: af31 = 26
.. data:: af32 = 28
.. data:: af33 = 30
.. data:: af41 = 34
.. data:: af42 = 36
.. data:: af43 = 38
.. data:: cs1 = 8
.. data:: cs2 = 16
.. data:: cs3 = 24
.. data:: cs4 = 32
.. data:: cs5 = 40
.. data:: cs6 = 48
.. data:: cs7 = 56
.. data:: default = 0
.. data:: dscp = 57
.. data:: ef = 46
.. data:: precedence = 58
"""
af11 = 10
af12 = 12
af13 = 14
af21 = 18
af22 = 20
af23 = 22
af31 = 26
af32 = 28
af33 = 30
af41 = 34
af42 = 36
af43 = 38
cs1 = 8
cs2 = 16
cs3 = 24
cs4 = 32
cs5 = 40
cs6 = 48
cs7 = 56
default = 0
dscp = 57
ef = 46
precedence = 58
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['DscpTypeEnum']
class Exp_ValueTypeEnum(Enum):
"""
Exp\_ValueTypeEnum
.. data:: cos = 0
.. data:: dscp = 1
.. data:: exp = 2
.. data:: precedence = 3
"""
cos = 0
dscp = 1
exp = 2
precedence = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['Exp_ValueTypeEnum']
class InterfaceTypeEnum(Enum):
"""
InterfaceTypeEnum
.. data:: BDI = 0
.. data:: FastEthernet = 1
.. data:: GigabitEthernet = 2
.. data:: Loopback = 3
.. data:: Port_channel = 4
.. data:: Serial = 5
.. data:: TenGigabitEthernet = 6
.. data:: Vlan = 7
"""
BDI = 0
FastEthernet = 1
GigabitEthernet = 2
Loopback = 3
Port_channel = 4
Serial = 5
TenGigabitEthernet = 6
Vlan = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['InterfaceTypeEnum']
class LimitDcNonDcTypeEnum(Enum):
"""
LimitDcNonDcTypeEnum
.. data:: disable = 0
"""
disable = 0
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['LimitDcNonDcTypeEnum']
class MobilityTypeEnum(Enum):
"""
MobilityTypeEnum
.. data:: bind_acknowledgement = 0
.. data:: bind_error = 1
.. data:: bind_refresh = 2
.. data:: bind_update = 3
.. data:: cot = 4
.. data:: coti = 5
.. data:: hot = 6
.. data:: hoti = 7
"""
bind_acknowledgement = 0
bind_error = 1
bind_refresh = 2
bind_update = 3
cot = 4
coti = 5
hot = 6
hoti = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['MobilityTypeEnum']
class MonthTypeEnum(Enum):
"""
MonthTypeEnum
.. data:: Jan = 0
.. data:: Feb = 1
.. data:: Mar = 2
.. data:: Apr = 3
.. data:: May = 4
.. data:: Jun = 5
.. data:: Jul = 6
.. data:: Aug = 7
.. data:: Sep = 8
.. data:: Oct = 9
.. data:: Nov = 10
.. data:: Dec = 11
"""
Jan = 0
Feb = 1
Mar = 2
Apr = 3
May = 4
Jun = 5
Jul = 6
Aug = 7
Sep = 8
Oct = 9
Nov = 10
Dec = 11
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['MonthTypeEnum']
class Prec_ValueTypeEnum(Enum):
"""
Prec\_ValueTypeEnum
.. data:: cos = 0
.. data:: dscp = 1
.. data:: exp = 2
.. data:: precedence = 3
"""
cos = 0
dscp = 1
exp = 2
precedence = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['Prec_ValueTypeEnum']
class PrecedenceTypeEnum(Enum):
"""
PrecedenceTypeEnum
.. data:: critical = 0
.. data:: flash = 1
.. data:: flash_override = 2
.. data:: immediate = 3
.. data:: internet = 4
.. data:: network = 5
.. data:: priority = 6
.. data:: routine = 7
"""
critical = 0
flash = 1
flash_override = 2
immediate = 3
internet = 4
network = 5
priority = 6
routine = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['PrecedenceTypeEnum']
class Qos_ValueTypeEnum(Enum):
"""
Qos\_ValueTypeEnum
.. data:: cos = 0
.. data:: dscp = 1
.. data:: exp = 2
.. data:: precedence = 3
"""
cos = 0
dscp = 1
exp = 2
precedence = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['Qos_ValueTypeEnum']
class RedistOspfExternalTypeEnum(Enum):
"""
RedistOspfExternalTypeEnum
.. data:: Y_1 = 0
.. data:: Y_2 = 1
"""
Y_1 = 0
Y_2 = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['RedistOspfExternalTypeEnum']
class WeekdayTypeEnum(Enum):
"""
WeekdayTypeEnum
.. data:: Mon = 0
.. data:: Tue = 1
.. data:: Wed = 2
.. data:: Thu = 3
.. data:: Fri = 4
.. data:: Sat = 5
.. data:: Sun = 6
"""
Mon = 0
Tue = 1
Wed = 2
Thu = 3
Fri = 4
Sat = 5
Sun = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_types as meta
return meta._meta_table['WeekdayTypeEnum']
|
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from Bunch import Bunch
from OrderedDict import OrderedDict
from Property import Property
# ----------------------------------------------------------------------
# Ordered Bunch
# ----------------------------------------------------------------------
class OrderedBunch(Bunch,OrderedDict):
""" An ordered dictionary that provides attribute-style access.
"""
_root = Property('_root')
_map = Property('_map')
def __new__(klass,*args,**kwarg):
self = Bunch.__new__(klass)
#if len(args) > 1:
#raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self._root
except:
root = [] # sentinel node
root[:] = [root, root, None]
dict.__setitem__(self,'_root',root)
dict.__setitem__(self,'_map' ,{})
return self
def __setattr__(self, key, value):
"""od.__setitem__(i, y) <==> od[i]=y"""
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if not hasattr(self,key) and not hasattr(self.__class__,key):
root = dict.__getitem__(self,'_root')
last = root[0]
map = dict.__getitem__(self,'_map')
last[1] = root[0] = map[key] = [last, root, key]
Bunch.__setattr__(self,key, value)
def __delattr__(self, key):
"""od.__delitem__(y) <==> del od[y]"""
# Deleting an existing item uses self._map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
Bunch.__delattr__(self,key)
link_prev, link_next, key = self._map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __setitem__(self,k,v):
self.__setattr__(k,v)
def __delitem__(self,k):
self.__delattr__(k)
def __iter__(self):
"""od.__iter__() <==> iter(od)"""
root = self._root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
"""od.__reversed__() <==> reversed(od)"""
root = self._root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
"""od.clear() -> None. Remove all items from od."""
try:
for node in self._map.itervalues():
del node[:]
root = self._root
root[:] = [root, root, None]
self._map.clear()
except AttributeError:
pass
Bunch.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self._root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self._map[key]
value = Bunch.pop(self,key)
return key, value
def __reduce__(self):
"""Return state information for pickling"""
items = [( k, OrderedBunch.__getitem__(self,k) ) for k in OrderedBunch.iterkeys(self)]
inst_dict = vars(self).copy()
for k in vars(OrderedBunch()):
inst_dict.pop(k, None)
return (_reconstructor, (self.__class__,items,), inst_dict)
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def copy(self):
"""od.copy() -> a shallow copy of od"""
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, (OrderedBunch,OrderedDict)):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# allow override of iterators
__iter = __iter__
def keys(self):
"""OrderedDict.keys() -> list of keys in the dictionary"""
return list(self.__iter())
def values(self):
"""OrderedDict.values() -> list of values in the dictionary"""
return [self[key] for key in self.__iter()]
def items(self):
"""OrderedDict.items() -> list of (key, value) pairs in the dictionary"""
return [(key, self[key]) for key in self.__iter()]
def iterkeys(self):
"""OrderedDict.iterkeys() -> an iterator over the keys in the dictionary"""
return self.__iter()
def itervalues(self):
"""OrderedDict.itervalues -> an iterator over the values in the dictionary"""
for k in self.__iter():
yield self[k]
def iteritems(self):
"""od.iteritems -> an iterator over the (key, value) items in the dictionary"""
for k in self.__iter():
yield (k, self[k])
# for rebuilding dictionaries with attributes
def _reconstructor(klass,items):
self = OrderedBunch.__new__(klass)
OrderedBunch.__init__(self,items)
return self
# ----------------------------------------------------------------------
# Module Tests
# ----------------------------------------------------------------------
if __name__ == '__main__':
o = OrderedBunch()
print 'should be zero:' , len(o)
o['x'] = 'hello'
o.y = 1
o['z'] = [3,4,5]
o.t = OrderedBunch()
o.t['h'] = 20
o.t.i = (1,2,3)
print o
import pickle
d = pickle.dumps(o)
p = pickle.loads(d)
print ''
print p
o.t['h'] = 'changed'
p.update(o)
print ''
print p
class TestClass(OrderedBunch):
a = Property('a')
def __init__(self):
self.a = 'hidden!'
self.b = 'hello!'
c = TestClass()
print c
|
|
"""
Agent smoketest code.
This python script is meant to be invoked within a docker image in which the proper python version is activated (e.g.
via pyenv). In this way, the agent can be validated against different python versions.
Concept:
This code serves as a common code-base for different types of smoketest "processes" (i.e. same code runs in
different modes). Examples of modes are (uploader, verifier).
Uploader (a.k.a Producer):
Waits for Scalyr agent to be up and running (by querying scalyr backend).
Produces 1000 lines of dummy data very quickly, then produces one additional line of data every second.
If the agent is working correctly, this data will be correctly ingested and uploaded to Scalyr (by the agent)
and can subsequently be verified (by the Verifier).
Verifier:
Waits for Scalyr agent to be up and running.
Keeps polling until max_wait for the expected uploader data.
Usage:
smoketest.py ${process_name} ${max_wait} \
--mode verifier \
--scalyr_server ${SCALYR_SERVER} \
--read_api_key ${READ_API_KEY} \
--agent_hostname ${agent_hostname} \
--uploader_hostname ${uploader_hostname} \
--debug true"
where:
process_name: A means by which the invoker script can inform this script what the current process name is.
The process_name is important as it is parsed/modified to construct verifying queries.
E.g. process_name is used to construct a logfile to be queried such as "/docker/<process_name>-uploader.log".
Moreover, any given CI build should not conflict with other builds and therefore should have a unique
process name (e.g. /docker/ci-agent-docker-json-5986-uploader.log where "ci-agent-docker-json-5986" is a unique
identifier specific to a CI build.
Additionally, the process name determines which class to instantiate (see
CONTAINER_PREFIX_2_VERIFIER_CLASS). The invoker can choose different implementations (e.g. for LogStash)
by using on of the prefixes defined CONTAINER_PREFIX_2_VERIFIER_CLASS. An object of that class is then
instantiated and begins running in the specified mode (either as an Uploader or Verifier).
max_wait: Maximum time to run until exiting with failure.
mode: Operational mode which determines what this process does. Must be one of (uploader, verifier, agent).
scalyr_server: Scalyr backend server to connect to (typically qatesting.scalyr.com when testing)
monitored_logfile: Absolute path of the data file to write which the agent then ingests. Logstash producers also
write to this file which is then configured to as an input into the Logstash aggregator.
python_version: Python version that the agent is running on (becomes part of the Uploader data)
read_api_key: Read API key to use when querying the Scalyr backend to verify expected data has been uploaded.
agent_hostname: Uploaders and Verifiers need to know the agent_hostname of the agent process in order to construct
a proper verifying query (because they query for a log line uploaded by the agent in order to know when it has
successfully started. This agent_hostname is typically passed in by the invoker script that starts the Uploader
or Verifier.
uploader_hostname: Similar to agent_hostname, Verifiers need to wait for Uploaders to finish uploading before
performing their verifying queries. The uploader_hostname is a necessary piece of information typically passed
in by the invoker script that starts the Uploader and Verifier.
debug: true|false . If true, prints out all Scalyr api queries (useful for debugging)
Note:
This test code require python 3 with specific packages installed (i.e. requests)
"""
from __future__ import print_function
from __future__ import absolute_import
__author__ = "[email protected]"
import argparse
import os
import json
import time
import requests
import socket
import sys
import threading
import shlex
from io import open
from copy import deepcopy
try:
from urllib.parse import urlencode, quote_plus, unquote_plus
except ImportError:
from urllib import urlencode, quote_plus, unquote_plus
NAME_SUFFIX_UPLOADER = "uploader"
NAME_SUFFIX_VERIFIER = "verifier"
# no actual Actor will run as the this name but the constant is needed for logic that checks on the Agent container
NAME_SUFFIX_AGENT = "agent"
NAME_SUFFIXES = [NAME_SUFFIX_UPLOADER, NAME_SUFFIX_VERIFIER, NAME_SUFFIX_AGENT]
def _pretty_print(header="", message="", file=sys.stdout):
if header:
print("", file=file)
print("=" * 79, file=file)
print(header, file=file)
print("=" * 79, file=file)
if len(message) > 0: # message can be spaces
print(message, file=file)
def _exit(code, show_agent_status=True, header="", message=""):
"""Prints agent status before exiting"""
file = sys.stdout if code == 0 else sys.stderr
if show_agent_status:
_pretty_print(header="BEGIN AGENT STATUS")
agent_exec = "/usr/share/scalyr-agent-2/bin/scalyr-agent-2"
if os.path.isfile(agent_exec):
os.system("{} status -v".format(shlex.quote(agent_exec)))
_pretty_print(header="END AGENT STATUS")
_pretty_print(message=" ")
_pretty_print(header, message, file=file)
# exit even if other threads are running
os._exit(code)
class SmokeTestActor(object):
"""
Abstract base class for all verifiers.
Some objects may only upload.
Others may only verify.
Some may do both, in which case we may need a barrier
"""
DEFAULT_POLL_INTERVAL_SEC = 10
def __init__(self, **kwargs):
self._process_name = kwargs.get("process_name")
self._scalyr_server = kwargs.get("scalyr_server")
self._read_api_key = kwargs.get("read_api_key")
self._max_wait = float(kwargs.get("max_wait"))
self._localhostname = socket.gethostname()
self._barrier = None
self._barrier_lock = threading.Lock()
self._lines_to_upload = 1000
self.__init_time = time.time()
self._agent_started_lock = threading.Lock()
self._agent_started = False
self._debug = (kwargs.get("debug") or "").lower() in (
"true",
"y",
"yes",
"t",
"1",
)
def _get_uploader_output_streams(self):
"""Returns list of streams to write log data"""
raise NotImplementedError
def _get_uploader_stream_names(self):
"""Returns list of streams to write log data"""
raise NotImplementedError
def _get_stream_name_from_stream(self, stream):
return stream.name[1:-1]
def get_hard_kill_time(self):
"""Returns time in epoch seconds for when this process must terminate"""
return self.__init_time + self._max_wait
def verifier_type(self):
raise NotImplementedError
def is_verifier(self):
raise NotImplementedError
def is_uploader(self):
raise NotImplementedError
def _get_barrier(self, parties=2):
"""Lazy-instantiate a barrier"""
with self._barrier_lock:
if not self._barrier:
self._barrier = threading.Barrier(parties, timeout=self._max_wait)
return self._barrier
def __wait_at_barrier(self):
"""
For coordinating processes.
Currently only used to prevent uploader OR verifier from proceeding until agent is verified up and running.
Note: uploader and verifier do not block each other, regardless of whether they
run within same process or in different processes.
"""
barrier = self._get_barrier()
if barrier:
print("... Blocking at barrier")
barrier.wait()
print("... Unblocked")
def exit(self, code, **kwargs):
_exit(code, **kwargs)
def verify_logs_uploaded(self):
"""Query scalyr to verify presence of uploaded data"""
raise NotImplementedError
def verify_agent_started_or_die(self):
"""Verify state or processes that should be present or running if agent is running"""
raise NotImplementedError
def wait_for_agent_to_start(self):
"""Both upload or verification should not begin until agent is confirmed started"""
with self._agent_started_lock:
if not self._agent_started:
self.verify_agent_started_or_die()
self._agent_started = True
def verify_or_die(self):
"""
Query the Scalyr backend in search for what we know we uploaded.
Error out after a certain time limit.
Returns:
Nothing. Exits with status 0 or 1
"""
self.wait_for_agent_to_start()
self.verify_logs_uploaded()
def _make_log_line(self, count, stream):
"""Return a line of text to be written to the log. Don't include trailing newline
Args:
count: line number (concrete class may choose to incorporate into line content for verification)
stream: output stream (concrete class may choose to incorporate into line content for verification)
"""
raise NotImplementedError
def trigger_log_upload(self):
self.wait_for_agent_to_start()
streams = self._get_uploader_output_streams()
count = 0
while time.time() < self.get_hard_kill_time():
for stream in streams:
stream.write(self._make_log_line(count, stream))
stream.write("\n")
stream.flush()
if count >= self._lines_to_upload:
time.sleep(1) # slow down if threshold is reached
# Write to all streams for a given count
count += 1
def _make_query_url(
self,
filter_dict=None,
message="",
override_serverHost=None,
override_log=None,
override_log_regex=None,
):
"""
Make url for querying Scalyr server. Any str filter values will be url-encoded
"""
base_params = sorted(self._get_base_query_params().items())
url = "https://" if not self._scalyr_server.startswith("http") else ""
url += "{0}/api/query?queryType=log&{1}".format(
self._scalyr_server, urlencode(base_params)
)
# NOTE: In theory we could also escape $, but API doesn't require it. It does appear to work
# both ways though.
# Set serverHost/logfile from object state if not overridden
if not filter_dict:
filter_dict = {}
filter_dict["$serverHost"] = override_serverHost or self._process_name
# only if no log regex is provided do we then add an exact logfile match
if not override_log_regex:
filter_dict["$logfile"] = (
override_log or self._logfile # pylint: disable=no-member
)
filter_frags = []
for k, v in sorted(filter_dict.items()):
if type(v) == str:
v = quote_plus('"{0}"'.format(v))
elif type(v) == bool:
v = quote_plus('"{0}"'.format(str(v).lower()))
filter_frags.append("{0}=={1}".format(k, v))
# If log regex is provided, add a regex matches clause
if override_log_regex:
filter_frags.append(
'{0} matches "{1}"'.format("$logfile", override_log_regex)
)
# Add message
if message:
filter_frags.append(
"$message{0}".format(quote_plus(' contains "{0}"'.format(message)))
)
url += "&filter={0}".format("+and+".join(filter_frags))
if self._debug:
print("\nURL quoted: {0}".format(url))
print(" unquoted: {0}".format(unquote_plus(url)))
print(" curl command: curl -v '{0}'".format(url))
return url
def _get_base_query_params(self):
"""Get base query params (not including filter)"""
params = {
"maxCount": 1,
"startTime": "10m",
"token": self._read_api_key,
}
return params
def poll_until_max_wait(
self,
verify_func,
description,
success_mesg,
fail_mesg,
exit_on_success=False,
exit_on_fail=False,
poll_interval=None,
):
"""
Template design pattern method for polling until a maximum time. Each poll executes the provided verify_func().
fail/success messages are parameterized, as well as whether to exit.
Args:
verify_func: Function to execute for each check. Must return True/False
description: Text to print at beginning of check
success_mesg: Text to print on success
fail_mesg: Text to print on failure
exit_on_success: If success, exit (with code 0)
exit_on_fail: If fail, exit (with code 1)
"""
_pretty_print(description)
verified = False
prev = time.time()
while time.time() < self.get_hard_kill_time():
# Try to verify upload by querying Scalyr server
sys.stdout.write(". ")
sys.stdout.flush()
verified = verify_func()
# query backend to confirm.
if verified:
success_mesg = "\nSUCCESS !!. " + success_mesg
if exit_on_success:
self.exit(0, message=success_mesg)
else:
_pretty_print(message=success_mesg, file=sys.stdout)
break
# Sleep a bit before trying again
time.sleep(poll_interval or SmokeTestActor.DEFAULT_POLL_INTERVAL_SEC)
cur = time.time()
if cur - prev > 10:
print(
"{} seconds remaining".format(int(self.get_hard_kill_time() - cur))
)
prev = cur
else:
fail_mesg = "FAILED. Time limit reached. " + fail_mesg
if exit_on_fail:
self.exit(1, message=fail_mesg)
else:
_pretty_print(message=fail_mesg, file=sys.stderr)
class StandaloneSmokeTestActor(SmokeTestActor):
"""
Standalone agent verifier.
A single process performs both Uploader and Verifier tasks.
Therefore, the logfile that we Upload to is the same file that is verified (filename queried for verification).
Waits for same-host Agent to be up and running (by watching for local agent.pid/log files).
Then writes to a Json file which is picked up by Agent.
Finally, queries Scalyr backend to condfirm Json file was uploaded.
"""
VERIFIER_TYPE = "Standalone"
def __init__(self, **kwargs):
super(StandaloneSmokeTestActor, self).__init__(**kwargs)
self._logfile = kwargs.get("monitored_logfile")
self._python_version = kwargs.get("python_version")
def is_verifier(self):
return True
def is_uploader(self):
return True
def _get_uploader_output_streams(self):
"""Returns stream to write log data into"""
return [open(self._logfile, "w+")]
def _get_uploader_stream_names(self):
"""Returns stream to read log data from"""
return [self._logfile]
def _make_log_line(self, count, stream):
"""Return a line of JSON for data.json (which is uploaded by the Agent)"""
obj = {
"verifier_type": self.VERIFIER_TYPE,
"count": count,
"hostname": self._localhostname,
"python_version": "python{}".format(self._python_version),
"line_stream": stream.name,
}
return json.dumps(obj)
def verify_agent_started_or_die(self):
"""Poll for agent pid and log file"""
def _check_agent_pid_and_log_files():
# If agent is not started, print agent.log if it exists
agent_logfile = "/var/log/scalyr-agent-2/agent.log"
agent_pid_file = "/var/log/scalyr-agent-2/agent.pid"
if not os.path.isfile(agent_pid_file) or not os.path.isfile(agent_logfile):
return False
return True
self.poll_until_max_wait(
_check_agent_pid_and_log_files,
"Checking for agent pid and log files",
"Agent is running.",
"No agent running.",
poll_interval=1,
)
def verify_logs_uploaded(self):
"""
For standalone agent, confirmation of log upload impinges on successful poll
of a single matching row as follows:
python_version matches the standalone agent python version
hostname matches the docker container hostname running the standalone agent
"""
def _query_scalyr_for_monitored_log_upload():
# TODO: This should be self._lines_to_upload (i.e. 1000, but it doesn't work
# for logstash where for some reason only 300-600 lines are uploaded most
# of the time. Once that bug is fixed, change this back to self._lines_to_upload
expected_count = 1000
resp = requests.get(
self._make_query_url(
{
"$verifier_type": self.VERIFIER_TYPE,
"$python_version": "python{}".format(self._python_version),
"$hostname": self._localhostname,
"$count": expected_count,
}
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
print(matches[0])
print("")
att = matches[0]["attributes"]
verifier_type = att["verifier_type"]
python_version = att["python_version"]
hostname = att["hostname"]
cnt = att["count"]
if all(
[
verifier_type == self.VERIFIER_TYPE,
python_version == "python{}".format(self._python_version),
hostname == self._localhostname,
cnt == expected_count,
]
):
return True
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False
self.poll_until_max_wait(
_query_scalyr_for_monitored_log_upload,
"Querying server to verify monitored logfile was uploaded.",
"Monitored logfile upload verified",
"Monitored logfile upload not verified",
exit_on_success=True,
exit_on_fail=True,
)
class DockerSmokeTestActor(SmokeTestActor):
"""
Base Docker actor.
Some containers will write logs to Scalyr but only one container will verify.
(The current setup has only one uploader + one verifier)
Because there are multiple processes (containers) running, it is necessary to synchronize them for the Smoketest
to correctly work.
Upload / Verify will not begin until the remote agent is confirmed to be up and running. This is done by querying
Scalyr.
For clarity/maintainability of the Upload/Verifier code, an actor should only upload or verify, not both. (This is
different from the Standalone actor where a single process runs both upload and verify and checks the local agent
via file system).
"""
def __init__(self, **kwargs):
"""
:param max_wait: Max seconds before exiting
:param mode: One of 'query', 'upload_and_ verify'
"""
super().__init__(**kwargs)
self.mode = kwargs.get("mode")
self._logfile = "/docker/{}.log".format(self._process_name)
self._agent_hostname = kwargs.get("agent_hostname")
self._uploader_hostname = kwargs.get("uploader_hostname")
_pretty_print('Agent hostname="{}"'.format(self._agent_hostname))
_pretty_print('Uploader hostname="{}"'.format(self._uploader_hostname))
def is_verifier(self):
return self.mode == NAME_SUFFIX_VERIFIER
def is_uploader(self):
return self.mode == NAME_SUFFIX_UPLOADER
def _serialize_row(self, obj):
"""Write a single row of key=value, separated by commas. Standardize by sorting keys"""
keyvals = [(key, obj.get(key)) for key in sorted(obj.keys())]
return ",".join(["{}={}".format(k, v) for k, v in keyvals])
def _make_log_line(self, count, stream):
return self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": count,
"line_stream": self._get_stream_name_from_stream(stream),
# No need hostname in logline. The agent_container_id & remote-container-logfile name uniquely identify the
# correct log.
# "hostname": self._localhostname,
}
)
def _get_process_name_for_suffix(self, suffix):
assert suffix in [
NAME_SUFFIX_AGENT,
NAME_SUFFIX_UPLOADER,
NAME_SUFFIX_VERIFIER,
]
parts = self._process_name.split("-")[:-1]
parts.append(suffix)
return "-".join(parts)
def _get_stream_name_from_stream(self, stream):
return stream.name[1:-1]
def _get_uploader_output_streams(self):
return [sys.stderr, sys.stdout]
def _get_uploader_stream_names(self):
"""Docker and k8s subclasses all verify by querying stream names of 'stderr' and 'stdout'"""
return [stream.name[1:-1] for stream in [sys.stderr, sys.stdout]]
def verify_agent_started_or_die(self):
"""
Docker agent is not running in same container as Verifier.
Verifier must query Scalyr to determine presence of these 2 files:
serverHost=<agent_short_container_id>, logfile=/var/log/scalyr-agent-2/agent.log
serverHost=<agent_short_container_id>, logfile=/var/log/scalyr-agent-2/docker_monitor.log
filter="Starting monitor docker_monitor()"
"""
def _query_scalyr_for_agent_logfile(logfile):
def _func():
resp = requests.get(
self._make_query_url(
override_serverHost=self._agent_hostname, override_log=logfile,
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
return False
matches = data["matches"]
if len(matches) == 0:
return False
return True
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False
return _func
for filename in self._get_expected_agent_logfiles():
self.poll_until_max_wait(
_query_scalyr_for_agent_logfile(filename),
"Check if Agent is running: query scalyr for agent container file: {}".format(
filename
),
"{} found".format(filename),
"Time limit reached. Could not verify liveness of Agent Docker Container.",
exit_on_success=False,
exit_on_fail=True,
)
def _get_expected_agent_logfiles(self):
return [
"/var/log/scalyr-agent-2/agent.log",
"/var/log/scalyr-agent-2/docker_monitor.log",
]
def _get_uploader_override_logfilename_regex(self, process_name):
"""All logfile filters are exact and therefore we return None in the general case"""
return None
def _get_mapped_logfile_prefix(self):
raise NotImplementedError
def _get_extra_query_attributes(self, stream_name, process_name):
"""Dictionary of query field key-vals (besides serverHost, logfile, filters)"""
raise NotImplementedError
def _verify_queried_attributes(self, att, stream_name, process_name):
if att.get("containerName") != process_name:
print(
"containerName attribute doesn't match process name. Expected '%s' got '%s'"
% (process_name, att.get("containerName"))
)
return False
return True
def verify_logs_uploaded(self):
"""
For docker agent, confirmation requires verification that all uploaders were able to uploaded.
There are 2 separate types of containers.
1. uploader: uploads data to Scalyr (can easily support multiple but for now, just 1)
2. verifier: verifies data was uploaded by uploader
"""
def _query_scalyr_for_upload_activity(contname_suffix, stream_name):
def _func():
process_name = self._get_process_name_for_suffix(contname_suffix)
resp = requests.get(
self._make_query_url(
self._get_extra_query_attributes(stream_name, process_name),
override_serverHost=self._agent_hostname,
override_log="{}/{}.log".format(
self._get_mapped_logfile_prefix(), process_name
),
override_log_regex=self._get_uploader_override_logfilename_regex(
process_name
),
message=self._serialize_row(
{
"verifier_type": self.VERIFIER_TYPE, # pylint: disable=no-member
"count": self._lines_to_upload,
"line_stream": stream_name,
}
),
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
print(matches[0])
print("")
att = matches[0]["attributes"]
return self._verify_queried_attributes(
att, stream_name, process_name
)
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False # Non-ok response
return _func
suffixes_to_check = [NAME_SUFFIX_UPLOADER]
for count, suffix in enumerate(suffixes_to_check):
for stream_name in self._get_uploader_stream_names():
self.poll_until_max_wait(
_query_scalyr_for_upload_activity(suffix, stream_name),
"Querying server to verify upload: container[stream]='{}[{}].".format(
self._get_process_name_for_suffix(suffix), stream_name
),
"Upload verified for {}[{}].".format(suffix, stream_name),
"Upload not verified for {}[{}].".format(suffix, stream_name),
exit_on_success=count == len(suffixes_to_check),
exit_on_fail=True,
)
class DockerJsonActor(DockerSmokeTestActor):
"""These subclasses capture differences between JSON and Syslog implementations"""
VERIFIER_TYPE = "Docker JSON"
def _get_mapped_logfile_prefix(self):
return "/docker"
def _get_extra_query_attributes(self, stream_name, process_name):
return {"$stream": stream_name}
def _verify_queried_attributes(self, att, stream_name, process_name):
if not super()._verify_queried_attributes(att, stream_name, process_name):
return False
if not all(
[att.get("stream") in stream_name, att.get("monitor") == "agentDocker"]
):
return False
return True
class DockerAPIActor(DockerSmokeTestActor):
"""
Verifier to be used when Docker monitor utilizes Docker API mode for ingesting log (aka
docker_raw_logs config option is False).
It verifies both streams - stdout and stderr.
"""
VERIFIER_TYPE = "Docker API (docker_raw_logs: false)"
def __init__(self, *args, **kwargs):
super(DockerAPIActor, self).__init__(*args, **kwargs)
# Stores a list of objects for matching lines we've seen
self._seen_matching_lines = set()
self._last_seen_timestamp = 0
def _get_base_query_params(self):
# NOTE: We can't really use last timestamp based querying since sometimes data appears to
# come in out of order so we miss messages that away
if self._last_seen_timestamp:
start_time = str(self._last_seen_timestamp)
else:
start_time = "10m"
params = {
"maxCount": 100,
"startTime": start_time,
"token": self._read_api_key,
}
return params
def verify_logs_uploaded(self):
"""
Function which verifies container logs were indeed correctly ingested into Scalyr.
"""
def _query_scalyr_for_monitored_log_upload(contname_suffix, stream_name):
def _func():
process_name = self._get_process_name_for_suffix(contname_suffix)
resp = requests.get(
self._make_query_url(
self._get_extra_query_attributes(stream_name, process_name),
override_serverHost=self._agent_hostname,
override_log="{}/{}.log".format(
self._get_mapped_logfile_prefix(), process_name
),
override_log_regex=self._get_uploader_override_logfilename_regex(
stream_name=stream_name, process_name=process_name
),
message=None,
)
)
if resp.ok:
data = json.loads(resp.content)
if "matches" not in data:
print('API response doesn\'t contain "matches" attribute')
print("API response: %s" % (str(data)))
return False
matches = data["matches"]
if len(matches) == 0:
print("Found 0 matches")
return False
print("")
print("Sample response for matches[0]")
print(matches[0])
print("")
self._last_seen_timestamp = int(matches[0]["timestamp"])
return self._verify_response_matches(
matches=matches,
stream_name=stream_name,
process_name=process_name,
)
print("Received non-OK (200) response")
print("Response status code: %s" % (resp.status_code))
print("Response text: %s" % (resp.text))
return False # Non-ok response
return _func
self.poll_until_max_wait(
_query_scalyr_for_monitored_log_upload("agent", "stdout"),
"Querying server to verify monitored logfile was uploaded.",
"Monitored logfile upload verified",
"Monitored logfile upload not verified",
exit_on_success=True,
exit_on_fail=True,
)
def _get_uploader_stream_names(self):
return ["stdout", "stderr"]
def _get_mapped_logfile_prefix(self):
return "/var/log/scalyr-agent-2"
def _serialize_row(self, obj):
return ""
def _get_uploader_override_logfilename_regex(self, stream_name, process_name):
# $logfile will look something like this:
# "/var/log/scalyr-agent-2/docker-ci-agent-docker-api-56640-agent-stdout.log"
# process name will contain a value similar to this one:
# ci-agent-docker-api-56644-agent
logname_suffix = process_name + "-" + stream_name
return "{}/docker-{}.log".format(
self._get_mapped_logfile_prefix(), logname_suffix
)
def _get_extra_query_attributes(self, stream_name, process_name):
return {}
def _verify_response_matches(self, matches, stream_name, process_name):
for item in matches:
attributes = item["attributes"]
message = item.get("message", "") or ""
self._verify_queried_attributes(
att=attributes,
message=message,
stream_name=stream_name,
process_name=process_name,
)
success = len(self._seen_matching_lines) == 1 + 2 + 2
if success:
print(
"Found all the required log lines (%s)"
% (str(self._seen_matching_lines))
)
return success
def _verify_queried_attributes(self, att, message, stream_name, process_name):
log_path = self._get_uploader_override_logfilename_regex(
stream_name=stream_name, process_name=process_name
)
if "Docker API (docker_raw_logs: false)" in message:
self._seen_matching_lines.add(message)
return
log_path = self._get_uploader_override_logfilename_regex(
stream_name=stream_name, process_name=process_name
)
# Message should look something like this:
# INFO [core] [copying_manager.py:423] Adding new log file
# '/var/log/scalyr-agent-2/docker-ci-agent-docker-api-57068-agent-stdout.log' for monitor
# 'scalyr_agent.builtin_monitors.docker_monitor'
if (
"Adding new log file" in message
and log_path in message
and "-stdout.log" in message
):
self._seen_matching_lines.add(message)
return
if (
"Adding new log file" in message
and log_path in message
and "-stderr.log" in message
):
self._seen_matching_lines.add(message)
return
# Message should look something like this:
# INFO [monitor:docker_monitor] [docker_monitor.py:1308] File
# /var/log/scalyr-agent-2/docker-ci-agent-docker-api-57087-verifier-stdout.log doesn't
# exist on disk. This likely means a new container has been started and no existing logs
# are available for it on disk. Original error: [Errno 2] No such file or directory:
# '/var/log/scalyr-agent-2/docker-ci-agent-docker-api-57087-verifier-stdout.log'
if (
"-stdout.log doesn't exist on disk. This likely means a new container has been started"
in message
):
self._seen_matching_lines.add(message)
return
if (
"-stderr.log doesn't exist on disk. This likely means a new container has been started"
in message
):
self._seen_matching_lines.add(message)
return
class DockerSyslogActor(DockerSmokeTestActor):
VERIFIER_TYPE = "Docker Syslog"
def _get_extra_query_attributes(self, stream_name, process_name):
return {}
def _get_mapped_logfile_prefix(self):
return "/var/log/scalyr-agent-2/containers"
def _verify_queried_attributes(self, att, stream_name, process_name):
if not super()._verify_queried_attributes(att, stream_name, process_name):
return False
expected_monitor = "agentSyslog"
# expected_parser = "agentSyslogDocker"
actual_monitor = att.get("monitor")
# actual_parser = att.get("parser")
# NOTE: "parser" attribute is not returned by the API anymore since early July 2020 so we
# only assert on the monitor name
if actual_monitor != expected_monitor:
print(
"Expected(monitor): '%s', got '%s'" % (expected_monitor, actual_monitor)
)
return False
return True
class K8sActor(DockerSmokeTestActor):
"""
Uploaders write to std output/error
Verifiers query for 'stdout', 'stderr'
"""
VERIFIER_TYPE = "Kubernetes"
def _get_expected_agent_logfiles(self):
return [
"/var/log/scalyr-agent-2/agent.log",
"/var/log/scalyr-agent-2/kubernetes_monitor.log",
]
def _get_mapped_logfile_prefix(self):
return "/docker"
def _get_extra_query_attributes(self, stream_name, process_name):
return {"$stream": stream_name}
def _verify_queried_attributes(self, att, stream_name, process_name):
"""
Here's example JSON response for k8s
"matches": [
{
"severity": 3,
"session": "log_session_5645060384390470634",
"attributes": {
"pod_namespace": "default",
"scalyr-category": "log",
"stream": "stderr",
"pod_uid": "f2d1d738-9a0c-11e9-9b04-080027029126",
"pod-template-hash": "76bcb9cf9",
"run": "ci-agent-k8s-7777-uploader",
"monitor": "agentKubernetes",
"k8s_node": "minikube",
"serverHost": "scalyr-agent-2-z5c8l",
"container_id": "6eb4215ac1589de13089419e90cdfe08c01262e6cfb821f18061a63ab4188a87",
"raw_timestamp": "2019-06-29T03:16:28.058676421Z",
"pod_name": "ci-agent-k8s-7777-uploader-76bcb9cf9-cb96t"
},
"thread": "default",
"message": "count=1000,line_stream=<stderr>,verifier_type=Kubernetes\n",
"timestamp": "1561778193736899060"
}
],
"""
if not all(
[
att.get("stream") in stream_name,
att.get("monitor") == "agentKubernetes",
process_name in att.get("pod_name"),
att.get("container_name") == "scalyr-agent",
]
):
return False
return True
def _get_uploader_override_logfilename_regex(self, process_name):
"""For k8s, return a logfile regex because it too difficult to construct an exact logfile filter.
The regex clause becomes: $logfile+matches+"/docker/k8s_ci-agent-k8s-7777-uploader.*"
"""
return "{}/k8s_{}*".format(self._get_mapped_logfile_prefix(), process_name)
class LogstashActor(DockerSmokeTestActor):
"""
Uploader writes to a common shared logfile that is bind-mounted in a shared volume (not local disk)
Verifier reads from common shareed logfile
"""
VERIFIER_TYPE = "Logstash"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._monitored_logfile = kwargs.get("monitored_logfile")
def _get_uploader_output_streams(self):
"""Returns stream for Uploader to write log data into"""
return [open(self._monitored_logfile, "w+")]
def _get_uploader_stream_names(self):
"""Returns stream to read log data from"""
return [self._monitored_logfile]
def _get_stream_name_from_stream(self, stream):
return stream.name
def _get_expected_agent_logfiles(self):
return ["scalyr_logstash.log"]
def _get_mapped_logfile_prefix(self):
return "/logstash"
def _get_extra_query_attributes(self, stream_name, process_name):
# {'$stream': stream.name}
# no server-side parser has been defined so cannot filter on $stream
return {}
def _verify_queried_attributes(self, att, stream_name, process_name):
if not all(
[
# att.get('stream') in stream.name, # we haven't setup server-side parser so $stream is not available
# Since the input streams are locally mounted, the event origins are all the same as the agent hostname
att.get("serverHost") == self._agent_hostname,
# the following fields are added on in the logstash pipeline config
# and should appear in every event
att.get("output_attribute1") == "output_value1",
att.get("output_attribute2") == "output_value2",
att.get("output_attribute3") == "output_value3",
# TODO: adjust if these are eventually split into "booleans"a
att.get("tags") == "[tag_t1, tag_t2]",
]
):
return False
return True
def _get_uploader_override_logfilename_regex(self, process_name):
"""For logstash setup, the input is a local file mounted to the logstash container, hence the fields are
host=container_id, path=/tmp/ci-plugin-logstash-7778-uploader.log
host/path are mapped to origin/logfile
"""
return self._monitored_logfile
# Select verifier class based on containers name (prefix)
CONTAINER_PREFIX_2_VERIFIER_CLASS = {
"ci-agent-standalone": StandaloneSmokeTestActor,
"ci-agent-docker-json": DockerJsonActor,
"ci-agent-docker-api": DockerAPIActor,
"ci-agent-docker-syslog": DockerSyslogActor,
"ci-agent-k8s": K8sActor,
"ci-plugin-logstash": LogstashActor,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"process_name",
type=str,
help="name of process running this instance of test code. Prefix should be a key in "
"CONTAINER_PREFIX_2_VERIFIER_CLASS so that the correct verifier can be chosen.",
)
parser.add_argument(
"max_wait", type=int, help="max seconds this test will run (will force-quit)"
)
# Generic param that can be used by any test as needed
parser.add_argument("--mode", type=str, help="mode switch", choices=NAME_SUFFIXES)
# For connecting to Scalyr. Note that we need not supply SCALYR_API_KEY as the Agent gets it from it's own config
# or the environment.
parser.add_argument(
"--scalyr_server",
type=str,
help="Scalyr backend server (required by Agent or Verifier containers)",
)
parser.add_argument(
"--read_api_key",
type=str,
help="read api key (required all Verifier containers)",
)
# For Standalone testing
parser.add_argument(
"--monitored_logfile",
type=str,
help="absolute path of data file to write to (must match Agent config). "
"Logstash producers also write to this, which are then picked up by the Logstash agent.",
)
parser.add_argument(
"--python_version",
type=str,
help="python version agent is running on (will be added into generated test data)",
)
# For Docker testing
parser.add_argument(
"--agent_hostname",
type=str,
help="hostname of Agent container (required by Docker/k8s Verifier containers",
)
parser.add_argument(
"--uploader_hostname",
type=str,
help="hostname of Uploader container (required by Docker/k8s Verifier containers",
)
parser.add_argument("--debug", type=str, help="turn on debugging")
args = parser.parse_args()
klass = None
for key, val in CONTAINER_PREFIX_2_VERIFIER_CLASS.items():
if args.process_name.startswith(key):
klass = CONTAINER_PREFIX_2_VERIFIER_CLASS.get(key)
break
# Display args to stdout, redacting sensitive keys
_pretty_print("Launching actor", message="Class={}".format(klass))
if not klass:
_exit(
1,
message="Bad test config: process_name must start with one of {}".format(
list(CONTAINER_PREFIX_2_VERIFIER_CLASS.keys())
),
)
args_copy = deepcopy(vars(args))
if "read_api_key" in args_copy:
args_copy["read_api_key"] = args_copy["read_api_key"][:4] + "xxxxxxxxx"
_pretty_print("smoketest.py command line args", str(args_copy))
actor = klass(**vars(args)) # type: ignore
# Optionally start upload in a separate thread. Verifiers should not upload.
uploader_thread = None
if actor.is_uploader():
_pretty_print("START UPLOAD", actor._process_name)
uploader_thread = threading.Thread(target=actor.trigger_log_upload, args=())
uploader_thread.start()
if actor.is_verifier():
_pretty_print("START VERIFIER", actor._process_name)
actor.verify_or_die()
# If verify_or_die hasn't force-killed the program, wait for uploader to finish
if uploader_thread:
uploader_thread.join()
|
|
#
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import unittest
import uuid
import mock
import dcm.agent.exceptions as exceptions
import dcm.agent.tests.utils.general as test_utils
import dcm.agent.cloudmetadata as cm
class TestCloudMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.CloudMetaData(self.conf)
def test_base_instance_id(self):
instance_id = self.cm_obj.get_instance_id()
self.assertIsNone(instance_id)
def test_base_is_effective(self):
v = self.cm_obj.is_effective_cloud()
self.assertFalse(v)
def test_base_startup(self):
self.assertRaises(exceptions.AgentNotImplementedException,
self.cm_obj.get_startup_script)
def test_base_get_cloud_type(self):
self.assertRaises(exceptions.AgentNotImplementedException,
self.cm_obj.get_cloud_type)
def test_env_injected_id_no_env(self):
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
injected_id = self.cm_obj.get_injected_id()
self.assertIsNone(injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_env_injected_id_env(self):
tmp_dir = tempfile.mkdtemp()
fake_id = str(uuid.uuid4())
id_file = os.path.join(tmp_dir, "injected_id")
try:
self.conf.get_secure_dir.return_value = tmp_dir
with mock.patch.dict('os.environ',
{cm.ENV_INJECTED_ID_KEY: fake_id}):
injected_id = self.cm_obj.get_injected_id()
self.assertEqual(injected_id, fake_id)
self.assertTrue(os.path.exists(id_file))
with open(id_file, "r") as fptr:
v = fptr.read().strip()
self.assertEqual(v, injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_env_injected_id_env_file_exists(self):
tmp_dir = tempfile.mkdtemp()
fake_id = str(uuid.uuid4())
id_file = os.path.join(tmp_dir, "injected_id")
try:
with open(id_file, "w") as fptr:
fptr.write(fake_id)
self.conf.get_secure_dir.return_value = tmp_dir
injected_id = self.cm_obj.get_injected_id()
self.assertEqual(injected_id, fake_id)
with open(id_file, "r") as fptr:
v = fptr.read().strip()
self.assertEqual(v, injected_id)
finally:
shutil.rmtree(tmp_dir)
def test_ipv4_address(self):
addr = self.cm_obj.get_ipv4_addresses()
self.assertEqual(type(addr), list)
self.assertGreaterEqual(len(addr), 1)
def test_handshake_address(self):
addr = self.cm_obj.get_handshake_ip_address()
self.assertEqual(type(addr), list)
self.assertGreaterEqual(len(addr), 1)
class TestUnknownMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.UnknownMetaData(conf)
def test_effective_cloud(self):
self.assertTrue(self.cm_obj.is_effective_cloud())
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(), cm.CLOUD_TYPES.UNKNOWN)
class TestAWSMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.AWSMetaData(self.conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(), cm.CLOUD_TYPES.Amazon)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_startup(self, md_server_data):
startup_data = "some date"
md_server_data.return_value = startup_data
sd = self.cm_obj.get_startup_script()
self.assertEqual(startup_data, sd)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_injected_id(self, md_server_data):
fake_id = "somedata"
md_server_data.return_value = fake_id
sd = self.cm_obj.get_injected_id()
self.assertEqual(fake_id, sd)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_injected_id_none(self, md_server_data):
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
fake_id = None
md_server_data.return_value = fake_id
sd = self.cm_obj.get_injected_id()
self.assertIsNone(sd)
finally:
shutil.rmtree(tmp_dir)
class TestCloudStackMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.CloudStackMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.CloudStack)
class TestJoyentMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self.conf = mock.Mock()
self.cm_obj = cm.JoyentMetaData(self.conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Joyent)
@mock.patch('dcm.agent.utils.run_command')
def test_base_injected_id(self, runcmd):
fakeid = "someid"
runcmd.return_value = (fakeid, "", 0)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
@mock.patch('dcm.agent.utils.run_command')
def test_base_cached_injected_id(self, runcmd):
fakeid = "someid"
runcmd.return_value = (fakeid, "", 0)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
x = self.cm_obj.get_injected_id()
self.assertEqual(fakeid, x)
@mock.patch('dcm.agent.utils.run_command')
def test_base_injected_try_both_locations(self, runcmd):
runcmd.return_value = ("", "error", 1)
tmp_dir = tempfile.mkdtemp()
try:
self.conf.get_secure_dir.return_value = tmp_dir
self.conf.system_sudo = "sudo"
x = self.cm_obj.get_injected_id()
call1 = mock.call(
self.conf,
["sudo", "/usr/sbin/mdata-get", "es:dmcm-launch-id"])
call2 = mock.call(
self.conf,
["sudo", "/lib/smartdc/mdata-get", "es:dmcm-launch-id"])
self.assertEqual(runcmd.call_args_list, [call1, call2])
self.assertEqual(runcmd.call_count, 2)
self.assertIsNone(x)
finally:
shutil.rmtree(tmp_dir)
class TestGCEMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.GCEMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Google)
class TestAzureMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.AzureMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.Azure)
class TestOpenStackMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.OpenStackMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.OpenStack)
class TestKonamiMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.KonamiMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES._Konami)
class TestDigitalOceanMetaDataBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
conf = mock.Mock()
self.cm_obj = cm.DigitalOceanMetaData(conf)
def test_cloud_type(self):
self.assertEqual(self.cm_obj.get_cloud_type(),
cm.CLOUD_TYPES.DigitalOcean)
@mock.patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_base_startup(self, md_server_data):
startup_data = "some date"
md_server_data.return_value = startup_data
sd = self.cm_obj.get_startup_script()
self.assertEqual(startup_data, sd)
|
|
#! /usr/bin/env python3
# Modules must contain:
# definitions()
# ui_definitions()
# view_definitions()
app_name = "susm"
STATUS = {
0: 'Dead',
1: 'Egg',
2: 'Infertile egg',
3: 'Fertile egg',
4: 'Damaged egg',
5: 'Died off egg',
6: 'Dead in shell',
10: 'Pinkie',
20: 'Fledgling',
30: 'Weaned',
40: 'Moulting',
}
"""
1 (EGG) > 2 (INFERTILE EGG) > 0 (DEAD)
> 3 (FETILE EGG) > 4 (DAMAGED EGG) > 0 (DEAD)
> 5 (DIED OFF EGG) > 0 (DEAD)
> 6 (DIS) > 0 (DEAD)
> 10 (PINKIE) > 0 (DEAD)
> 20 (FLEDGLING) > 0 (DEAD)
> 30 (WEANED) > 0 (DEAD)
> 40 (MOULTING) > 0 (DEAD)
> 100 (ADULT PLUMAGE)
Transistions:
0 #
1 > 0,2,3,4
2 > 0,4
3 > 0,4,5,6,10
4 > 0
5 > 0
6 > 0
To:
0 < *
1 (+ = new)
2 < 1
3 < 1
4 < 1,2,3
5 < 2
6 < 2
10 < 2
--- < 50: not yet a budgie
--- 50-100: a sick bird.
--- 100-150: a normal budgie
--- 150-200:
--- 200-250:
"""
def requirements():
return ["base"]
def definitions(db, scope):
return {}
def ui_definitions(db, scope):
return {}
def view_definitions():
name = 'Breeding Management'
flow_name = name.upper()
ref_name = "".join(c.lower() if c.isalnum() else "" for c in name)
# Data queries.
queries = {}
queries["%s.locations" % (ref_name)] = ("result = Location.select(lambda bl: bl.type == Location.BREEDING).order_by(desc(Location.parent), desc(Location.name))", {})
queries["%s.adults_in_location" % (ref_name)] = ("result = Individual.select(lambda adults: location.id == %(location_id)s and status > 50).order_by(desc(Individual.sex))", {'location_id': None})
queries["%s.young_in_location" % (ref_name)] = ("result = Individual.select(lambda adults: location.id == %(location_id)s and 0 < status < 50).order_by(desc(Individual.dob))", {'location_id': None})
# Views
"""
VIEW
[TAB*]
[SECTION*]
[LINE*]
(LABEL? FIELD? BUTTON?)+
Tabs:
# All identical tabs, depending on the data:
name, tabs[query,title], sections
# Specific tabs:
name, tabs[sections]
Sections:
title, lines
Lines:
query
"""
views = {}
views[flow_name] = {
"name": name,
"tabs": {
"query": "%s.locations" % (ref_name),
"title": ".name"
},
"sections": {
0: {
"title": "Parents",
"lines": {
"query": "%s.adults_in_location" % (ref_name),
"elements": {
0: {
'type': 'label',
'value': '.sex'
},
1: {
'type': 'button',
'value': '.code',
'outmessage': "INDIVIDUAL"
},
2: {
'type': 'button',
'value': "X",
'outmessage': "REMOVE"
},
},
1: {
"elements": {
0: {
'type': 'button',
'value': "+",
'outmessage': "FINDPARENT"
}
}
}
}
},
1: {
"title": "Eggs/Chicks",
"lines": {
"query": "%s.young_in_location" % (ref_name),
"elements": {
0: {
'type': 'button',
'value': '.code'
},
1: {
'type': 'label',
'if': '.status == 1',
'value': 'Egg'
},
2: {
'type': 'label',
'if': '.status == 2',
'value': 'Inf.'
},
3: {
'type': 'label',
'if': '.status == 3',
'value': 'Fert.'
},
4: {
'type': 'label',
'if': '.status == 4',
'value': 'Dam.'
},
5: {
'type': 'label',
'if': '.status == 5',
'value': 'Bad'
},
6: {
'type': 'label',
'if': '.status == 6',
'value': 'DIS'
},
7: {
'type': 'label',
'if': '6 < .status < 10',
'value': 'Egg?'
},
8: {
'type': 'label',
'if': '10 <= .status < 20',
'value': 'Pinkie'
},
9: {
'type': 'label',
'if': '20 <= .status < 30',
'value': 'Fledgling'
},
10: {
'type': 'label',
'if': '30 <= .status',
'value': 'Young'
},
21: {
'type': 'button',
'value': 'X'
},
22: {
'type': 'button',
'if': '.status == 1',
'value': 'Inf'
},
23: {
'type': 'button',
'if': '.status == 1',
'value': 'Fer'
},
24: {
'type': 'button',
'if': '.status in [1,2,3]',
'value': 'Dam'
},
25: {
'type': 'button',
'if': '.status == 1',
'value': 'Die'
},
26: {
'type': 'button',
'if': '.status == 1',
'value': 'DIS'
},
27: {
'type': 'button',
'if': '.status == 1',
'value': 'H'
}
},
1: {
'elements': {
0:{
'type': 'button',
'value': "+"
}
}
}
}
}
}
}
flow = {}
return (queries, views, {flow_name: flow})
if __name__ == "__main__":
# jQuery URL's:
# /data/<Class>/<primary key>
# /query/<query name>?param1=value¶m2=value
# How to call queries
test_queries = {}
def do_query(name, scope=None, **kwargs):
if scope is None:
scope = {}
query_template, defaults = test_queries[name]
params = defaults.copy()
params.update(kwargs)
query = query_template % params
exec("result = %s" % (query), scope)
return scope['result']
print("")
print("===================")
print("HOW TO CALL QUERIES")
print("===================")
print("")
test_queries = {'test': ('result = [i+%(offset)s for i in range(limit)]', {'offset': 1})}
queryname = 'test'
limit = 5
print("%s with limit %s: %s" % (queryname, limit, do_query(queryname, scope=locals())))
limit = 3
print("%s with limit %s: %s" % (queryname, limit, do_query(queryname, scope=locals(), offset=10)))
print("")
print("========")
print("BREEDING")
print("========")
print("")
(queries, views, flow) = view_definitions()
for name, (query_template, query_defaults) in queries.items():
print("Query %s: %s with %s." % (name, query_template, query_defaults))
print("")
class L():
def __init__(self, id):
self.id = id
@property
def parent(self):
return None
@property
def name(self):
return "BC %s" % self.id
class I():
def __init__(self, id, code=None, sex=None, status=None, location=None):
self.id = id
self.code = code
self.sex = sex
if status is None:
status = 0
self.status = status
self.location = L(1)
test_queries = {
'breedingmanagement.adults_in_location': ('result = [I(1,"GOc",1,100), I(2,"VAYF",2,100)]', {}),
'breedingmanagement.young_in_location': ('result = [%s]' % (",".join('I(%s,"(GOVAYF)%s",None,%s)' % (i+3, i+62, i) for i in [i for i in range(7)] + [(i+1)*10 for i in range(4)])), {}),
'breedingmanagement.locations': ('result = [L(i+1) for i in range(limit)]', {})
}
limit = 3
width = 70
for name, definition in views.items():
print("View %s" % (name))
title = definition.get('name', name)
# VIEW HEAD
print("\t" + "=" * width)
print("\t%s" % (title))
print("\t" + "=" * width)
tabs = definition.get('tabs', {0: {'title': ''}})
tab_titles = []
if 'query' in tabs:
tab_title = tabs.get('title', name)
tab_objects = do_query(tabs['query'], scope=locals())
if tab_title[0] == ".":
tab_titles = [getattr(tab, tab_title[1:]) for tab in tab_objects]
else: # Loop over all integer keys and get out the titles.
tab_titles = [tabs[i]['title'] for i in tabs.sorted() if isinstance(i, int)]
tab_header = "|".join(tab_titles)
tab_line_header = "+".join("-" * len(title) for title in tab_titles)
# TAB HEAD
print("\t+%s+" % tab_line_header)
print("\t|%s|" % tab_header)
print("\t|%s+%s+" % (" " * len(tab_titles[0]), "-" * (width - 3 - len(tab_titles[0]))))
# SECTION
sections = tabs.get('sections', definition.get('sections', {0: {'title': ''}}))
for s in sorted(sections.keys(), key=str):
if not isinstance(s, int):
continue
section_title = sections[s].get('title', '')
print("\t| +-%s%s+ |" % (section_title, "-" * (width - 7 - len(section_title))))
# LINES
lines = sections[s].get('lines', tabs.get('lines', definition.get('sections', {0: {'title': ''}})))
line_objects = []
if 'query' in lines:
line_objects = do_query(lines['query'], scope=locals())
for line_object in line_objects:
line_elements = []
if 'elements' in lines:
for e in lines['elements']:
show = True
if 'if' in lines['elements'][e]:
exec("show = %s" % (lines['elements'][e]['if'].replace(".", "line_object.")), locals())
if not show:
continue
value = lines['elements'][e].get('value', '#')
if value[0] == ".":
value = getattr(line_object, value[1:])
if lines['elements'][e].get('type', '') == 'button':
line_elements.append("[%s]" % (value))
else:
line_elements.append(str(value))
if line_elements:
line = " ".join(line_elements)
print("\t| | %s%s | |" % (line, " " * (width -8 - len(line))))
for l in sorted(lines.keys(), key=str):
if isinstance(l, int):
for e in sorted(lines[l]['elements']):
line = lines[l]['elements'][e].get('value', str(l))
if lines[l]['elements'][e].get('type', '') == 'button':
line = "[%s]" % (line)
print("\t| | %s%s | |" % (line, " " * (width - 8 - len(str(line)))))
print("\t| +%s+ |" % ("-" * (width - 6)))
# TAB TAIL
print("\t+%s+" % ("-" * (width - 2)))
# VIEW TAIL
print("\t" + "=" * width)
print("")
for context, subflow in flow.items():
print("Flow %s" % (context))
for item in subflow:
print("\t%s" % (subflow))
print("")
|
|
# coding: utf-8
"""
StaSh - Pythonista Shell
https://github.com/ywangd/stash
"""
__version__ = '0.7.5'
import imp as pyimp # rename to avoid name conflict with objc_util
import logging
import logging.handlers
import os
import platform
import sys
from io import IOBase
import six
from six import BytesIO, StringIO
from six.moves.configparser import ConfigParser
# noinspection PyPep8Naming
from .system.shcommon import (_EXTERNAL_DIRS, _STASH_CONFIG_FILES, _STASH_ROOT, _SYS_STDOUT, IN_PYTHONISTA, ON_IPAD)
from .system.shcommon import Control as ctrl
from .system.shcommon import Escape as esc
from .system.shcommon import Graphics as graphics
from .system.shio import ShIO
from .system.shiowrapper import disable as disable_io_wrapper
from .system.shiowrapper import enable as enable_io_wrapper
from .system.shparsers import ShCompleter, ShExpander, ShParser
from .system.shruntime import ShRuntime
from .system.shscreens import ShSequentialScreen
from .system.shstreams import ShMiniBuffer, ShStream
from .system.shui import get_ui_implementation
from .system.shuseractionproxy import ShUserActionProxy
# Setup logging
LOGGER = logging.getLogger('StaSh')
# Debugging constants
_DEBUG_STREAM = 200
_DEBUG_RENDERER = 201
_DEBUG_MAIN_SCREEN = 202
_DEBUG_MINI_BUFFER = 203
_DEBUG_IO = 204
_DEBUG_UI = 300
_DEBUG_TERMINAL = 301
_DEBUG_TV_DELEGATE = 302
_DEBUG_RUNTIME = 400
_DEBUG_PARSER = 401
_DEBUG_EXPANDER = 402
_DEBUG_COMPLETER = 403
# Default configuration (can be overridden by external configuration file)
_DEFAULT_CONFIG = """[system]
rcfile=.stashrc
py_traceback=0
py_pdb=0
input_encoding_utf8=1
thread_type=ctypes
[display]
TEXT_FONT_SIZE={font_size}
BUTTON_FONT_SIZE=14
BACKGROUND_COLOR=(0.0, 0.0, 0.0)
TEXT_COLOR=(1.0, 1.0, 1.0)
TINT_COLOR=(0.0, 0.0, 1.0)
INDICATOR_STYLE=white
BUFFER_MAX=150
AUTO_COMPLETION_MAX=50
VK_SYMBOLS=~/.-*|>$'=!&_"\\?`
[style]
enable_styles=1
colored_errors=1
[history]
ipython_style_history_search=1
allow_double_lines=0
hide_whitespace_lines=1
maxsize=50
""".format(
font_size=(14 if ON_IPAD else 12),
)
# create directories outside STASH_ROOT
# we should do this each time StaSh because some commands may require
# this directories
for p in _EXTERNAL_DIRS:
if not os.path.exists(p):
try:
os.mkdir(p)
except:
pass
class StaSh(object):
"""
Main application class. It initialize and wires the components and provide
utility interfaces to running scripts.
"""
PY3 = six.PY3
def __init__(self, debug=(), log_setting=None, no_cfgfile=False, no_rcfile=False, no_historyfile=False, command=None):
self.__version__ = __version__
# Intercept IO
enable_io_wrapper()
self.config = self._load_config(no_cfgfile=no_cfgfile)
self.logger = self._config_logging(log_setting)
self.enable_styles = self.config.getboolean("style", "enable_styles")
self.user_action_proxy = ShUserActionProxy(self)
# Tab handler for running scripts
self.external_tab_handler = None
# Wire the components
self.main_screen = ShSequentialScreen(
self,
nlines_max=self.config.getint('display',
'BUFFER_MAX'),
debug=_DEBUG_MAIN_SCREEN in debug
)
self.mini_buffer = ShMiniBuffer(self, self.main_screen, debug=_DEBUG_MINI_BUFFER in debug)
self.stream = ShStream(self, self.main_screen, debug=_DEBUG_STREAM in debug)
self.io = ShIO(self, debug=_DEBUG_IO in debug)
ShUI, ShSequentialRenderer = get_ui_implementation()
self.terminal = None # will be set during UI initialisation
self.ui = ShUI(self, debug=(_DEBUG_UI in debug), debug_terminal=(_DEBUG_TERMINAL in debug))
self.renderer = ShSequentialRenderer(self, self.main_screen, self.terminal, debug=_DEBUG_RENDERER in debug)
parser = ShParser(debug=_DEBUG_PARSER in debug)
expander = ShExpander(self, debug=_DEBUG_EXPANDER in debug)
self.runtime = ShRuntime(self, parser, expander, no_historyfile=no_historyfile, debug=_DEBUG_RUNTIME in debug)
self.completer = ShCompleter(self, debug=_DEBUG_COMPLETER in debug)
# Navigate to the startup folder
if IN_PYTHONISTA:
os.chdir(self.runtime.state.environ_get('HOME2'))
self.runtime.load_rcfile(no_rcfile=no_rcfile)
self.io.write(
self.text_style(
'StaSh v%s on python %s\n' % (
self.__version__,
platform.python_version(),
),
{
'color': 'blue',
'traits': ['bold']
},
always=True,
),
)
# warn on py3
if self.PY3:
self.io.write(
self.text_style(
'Warning: you are running StaSh in python3. Some commands may not work correctly in python3.\n',
{'color': 'red'},
always=True,
),
)
self.io.write(
self.text_style(
'Please help us improving StaSh by reporting bugs on github.\n',
{
'color': 'yellow',
'traits': ['italic']
},
always=True,
),
)
# Load shared libraries
self._load_lib()
# run command (this calls script_will_end)
if command is None:
# show tip of the day
command = '$STASH_ROOT/bin/totd.py'
if command:
# do not run command if command is False (but not None)
if self.runtime.debug:
self.logger.debug("Running command: {!r}".format(command))
self(command, add_to_history=False, persistent_level=0)
def __call__(self, input_, persistent_level=2, *args, **kwargs):
""" This function is to be called by external script for
executing shell commands """
worker = self.runtime.run(input_, persistent_level=persistent_level, *args, **kwargs)
worker.join()
return worker
@staticmethod
def _load_config(no_cfgfile=False):
config = ConfigParser()
config.optionxform = str # make it preserve case
# defaults
if not six.PY3:
config.readfp(BytesIO(_DEFAULT_CONFIG))
else:
config.read_file(StringIO(_DEFAULT_CONFIG))
# update from config file
if not no_cfgfile:
config.read(os.path.join(_STASH_ROOT, f) for f in _STASH_CONFIG_FILES)
return config
@staticmethod
def _config_logging(log_setting):
logger = logging.getLogger('StaSh')
_log_setting = {
'level': 'DEBUG',
'stdout': True,
}
_log_setting.update(log_setting or {})
level = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTEST': logging.NOTSET,
}.get(_log_setting['level'],
logging.DEBUG)
logger.setLevel(level)
if not logger.handlers:
if _log_setting['stdout']:
_log_handler = logging.StreamHandler(_SYS_STDOUT)
else:
_log_handler = logging.handlers.RotatingFileHandler('stash.log', mode='w')
_log_handler.setLevel(level)
_log_handler.setFormatter(
logging.Formatter(
'[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(name)s] [%(funcName)s] [%(lineno)d] - %(message)s'
)
)
logger.addHandler(_log_handler)
return logger
def _load_lib(self):
"""
Load library files as modules and save each of them as attributes
"""
lib_path = os.path.join(_STASH_ROOT, 'lib')
os.environ['STASH_ROOT'] = _STASH_ROOT # libcompleter needs this value
try:
for f in os.listdir(lib_path):
fp = os.path.join(lib_path, f)
if f.startswith('lib') and f.endswith('.py') and os.path.isfile(fp):
name, _ = os.path.splitext(f)
if self.runtime.debug:
self.logger.debug("Attempting to load library '{}'...".format(name))
try:
self.__dict__[name] = pyimp.load_source(name, fp)
except Exception as e:
self.write_message('%s: failed to load library file (%s)' % (f, repr(e)), error=True)
finally: # do not modify environ permanently
os.environ.pop('STASH_ROOT')
def write_message(self, s, error=False, prefix="stash: "):
"""
Write a message to the output.
:param s: message to write
:type w: str
:param error: whether this is an error message
:type error: bool
"""
s = '%s%s\n' % (prefix, s)
if error:
if self.runtime.debug:
self.logger.error(s)
if self.runtime.colored_errors:
s = self.text_color(s, "red")
else:
if self.runtime.debug:
self.logger.info(s)
self.io.write(s)
def launch(self, command=None):
"""
Launch StaSh, presenting the UI.
"""
self.ui.show()
# self.terminal.set_focus()
def close(self):
"""
Quit StaSh.
StaSh is based arround the UI, so we delegate this task to the UI,
which in turn will call self.on_exit().
"""
self.ui.close()
def on_exit(self):
"""
This method will be called when StaSh is about the be closed.
"""
self.runtime.save_history()
self.cleanup()
# Clear the stack or the stdout becomes unusable for interactive prompt
self.runtime.worker_registry.purge()
def cleanup(self):
"""
Perform cleanup here.
"""
disable_io_wrapper()
def get_workers(self):
"""
Return a list of all workers..
:return: a list of all workers
:rtype: list of [stash.system.shtreads.BaseThread]
"""
return [worker for worker in self.runtime.worker_registry]
# noinspection PyProtectedMember
# @staticmethod
def text_style(self, s, style, always=False):
"""
Style the given string with ASCII escapes.
:param str s: String to decorate
:param dict style: A dictionary of styles
:param bool always: If true, style will be applied even for pipes.
:return:
"""
# No color for pipes, files and Pythonista console
if not self.enable_styles or (not always and (isinstance(sys.stdout,
(StringIO,
IOBase)) # or sys.stdout.write.im_self is _SYS_STDOUT
or sys.stdout is _SYS_STDOUT)):
return s
fmt_string = u'%s%%d%s%%s%s%%d%s' % (ctrl.CSI, esc.SGR, ctrl.CSI, esc.SGR)
for style_name, style_value in style.items():
if style_name == 'color':
color_id = graphics._SGR.get(style_value.lower())
if color_id is not None:
s = fmt_string % (color_id, s, graphics._SGR['default'])
elif style_name == 'bgcolor':
color_id = graphics._SGR.get('bg-' + style_value.lower())
if color_id is not None:
s = fmt_string % (color_id, s, graphics._SGR['default'])
elif style_name == 'traits':
for val in style_value:
val = val.lower()
if val == 'bold':
s = fmt_string % (graphics._SGR['+bold'], s, graphics._SGR['-bold'])
elif val == 'italic':
s = fmt_string % (graphics._SGR['+italics'], s, graphics._SGR['-italics'])
elif val == 'underline':
s = fmt_string % (graphics._SGR['+underscore'], s, graphics._SGR['-underscore'])
elif val == 'strikethrough':
s = fmt_string % (graphics._SGR['+strikethrough'], s, graphics._SGR['-strikethrough'])
return s
def text_color(self, s, color_name='default', **kwargs):
return self.text_style(s, {'color': color_name}, **kwargs)
def text_bgcolor(self, s, color_name='default', **kwargs):
return self.text_style(s, {'bgcolor': color_name}, **kwargs)
def text_bold(self, s, **kwargs):
return self.text_style(s, {'traits': ['bold']}, **kwargs)
def text_italic(self, s, **kwargs):
return self.text_style(s, {'traits': ['italic']}, **kwargs)
def text_bold_italic(self, s, **kwargs):
return self.text_style(s, {'traits': ['bold', 'italic']}, **kwargs)
def text_underline(self, s, **kwargs):
return self.text_style(s, {'traits': ['underline']}, **kwargs)
def text_strikethrough(self, s, **kwargs):
return self.text_style(s, {'traits': ['strikethrough']}, **kwargs)
|
|
# test for xml.dom.minidom
import pickle
from test.support import verbose, run_unittest, findfile
import unittest
import xml.dom
import xml.dom.minidom
import xml.parsers.expat
from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom.minidom import getDOMImplementation
tstfile = findfile("test.xml", subdir="xmltestdata")
# The tests of DocumentType importing use these helpers to construct
# the documents to work with, since not all DOM builders actually
# create the DocumentType nodes.
def create_doc_without_doctype(doctype=None):
return getDOMImplementation().createDocument(None, "doc", doctype)
def create_nonempty_doctype():
doctype = getDOMImplementation().createDocumentType("doc", None, None)
doctype.entities._seq = []
doctype.notations._seq = []
notation = xml.dom.minidom.Notation("my-notation", None,
"http://xml.python.org/notations/my")
doctype.notations._seq.append(notation)
entity = xml.dom.minidom.Entity("my-entity", None,
"http://xml.python.org/entities/my",
"my-notation")
entity.version = "1.0"
entity.encoding = "utf-8"
entity.actualEncoding = "us-ascii"
doctype.entities._seq.append(entity)
return doctype
def create_doc_with_doctype():
doctype = create_nonempty_doctype()
doc = create_doc_without_doctype(doctype)
doctype.entities.item(0).ownerDocument = doc
doctype.notations.item(0).ownerDocument = doc
return doc
class MinidomTest(unittest.TestCase):
def confirm(self, test, testname = "Test"):
self.assertTrue(test, testname)
def checkWholeText(self, node, s):
t = node.wholeText
self.confirm(t == s, "looking for %s, found %s" % (repr(s), repr(t)))
def testParseFromFile(self):
with open(tstfile) as file:
dom = parse(file)
dom.unlink()
self.confirm(isinstance(dom, Document))
def testGetElementsByTagName(self):
dom = parse(tstfile)
self.confirm(dom.getElementsByTagName("LI") == \
dom.documentElement.getElementsByTagName("LI"))
dom.unlink()
def testInsertBefore(self):
dom = parseString("<doc><foo/></doc>")
root = dom.documentElement
elem = root.childNodes[0]
nelem = dom.createElement("element")
root.insertBefore(nelem, elem)
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.childNodes[0] is nelem
and root.childNodes.item(0) is nelem
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.firstChild is nelem
and root.lastChild is elem
and root.toxml() == "<doc><element/><foo/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem = dom.createElement("element")
root.insertBefore(nelem, None)
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.childNodes[2] is nelem
and root.childNodes.item(2) is nelem
and root.lastChild is nelem
and nelem.previousSibling is elem
and root.toxml() == "<doc><element/><foo/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem2 = dom.createElement("bar")
root.insertBefore(nelem2, nelem)
self.confirm(len(root.childNodes) == 4
and root.childNodes.length == 4
and root.childNodes[2] is nelem2
and root.childNodes.item(2) is nelem2
and root.childNodes[3] is nelem
and root.childNodes.item(3) is nelem
and nelem2.nextSibling is nelem
and nelem.previousSibling is nelem2
and root.toxml() ==
"<doc><element/><foo/><bar/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
dom.unlink()
def _create_fragment_test_nodes(self):
dom = parseString("<doc/>")
orig = dom.createTextNode("original")
c1 = dom.createTextNode("foo")
c2 = dom.createTextNode("bar")
c3 = dom.createTextNode("bat")
dom.documentElement.appendChild(orig)
frag = dom.createDocumentFragment()
frag.appendChild(c1)
frag.appendChild(c2)
frag.appendChild(c3)
return dom, orig, c1, c2, c3, frag
def testInsertBeforeFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, None)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"insertBefore(<fragment>, None)")
frag.unlink()
dom.unlink()
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, orig)
self.confirm(tuple(dom.documentElement.childNodes) ==
(c1, c2, c3, orig),
"insertBefore(<fragment>, orig)")
frag.unlink()
dom.unlink()
def testAppendChild(self):
dom = parse(tstfile)
dom.documentElement.appendChild(dom.createComment("Hello"))
self.confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
self.confirm(dom.documentElement.childNodes[-1].data == "Hello")
dom.unlink()
def testAppendChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.appendChild(frag)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"appendChild(<fragment>)")
frag.unlink()
dom.unlink()
def testReplaceChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.replaceChild(frag, orig)
orig.unlink()
self.confirm(tuple(dom.documentElement.childNodes) == (c1, c2, c3),
"replaceChild(<fragment>)")
frag.unlink()
dom.unlink()
def testLegalChildren(self):
dom = Document()
elem = dom.createElement('element')
text = dom.createTextNode('text')
self.assertRaises(xml.dom.HierarchyRequestErr, dom.appendChild, text)
dom.appendChild(elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.insertBefore, text,
elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.replaceChild, text,
elem)
nodemap = elem.attributes
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItem,
text)
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItemNS,
text)
elem.appendChild(text)
dom.unlink()
def testNamedNodeMapSetItem(self):
dom = Document()
elem = dom.createElement('element')
attrs = elem.attributes
attrs["foo"] = "bar"
a = attrs.item(0)
self.confirm(a.ownerDocument is dom,
"NamedNodeMap.__setitem__() sets ownerDocument")
self.confirm(a.ownerElement is elem,
"NamedNodeMap.__setitem__() sets ownerElement")
self.confirm(a.value == "bar",
"NamedNodeMap.__setitem__() sets value")
self.confirm(a.nodeValue == "bar",
"NamedNodeMap.__setitem__() sets nodeValue")
elem.unlink()
dom.unlink()
def testNonZero(self):
dom = parse(tstfile)
self.confirm(dom)# should not be zero
dom.appendChild(dom.createComment("foo"))
self.confirm(not dom.childNodes[-1].childNodes)
dom.unlink()
def testUnlink(self):
dom = parse(tstfile)
self.assertTrue(dom.childNodes)
dom.unlink()
self.assertFalse(dom.childNodes)
def testContext(self):
with parse(tstfile) as dom:
self.assertTrue(dom.childNodes)
self.assertFalse(dom.childNodes)
def testElement(self):
dom = Document()
dom.appendChild(dom.createElement("abc"))
self.confirm(dom.documentElement)
dom.unlink()
def testAAA(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAA")
a = el.getAttributeNode("spam")
self.confirm(a.ownerDocument is dom,
"setAttribute() sets ownerDocument")
self.confirm(a.ownerElement is dom.documentElement,
"setAttribute() sets ownerElement")
dom.unlink()
def testAAB(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAB")
dom.unlink()
def testAddAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(child.getAttribute("def") == "ghi")
self.confirm(child.attributes["def"].value == "ghi")
child.setAttribute("jkl", "mno")
self.confirm(child.getAttribute("jkl") == "mno")
self.confirm(child.attributes["jkl"].value == "mno")
self.confirm(len(child.attributes) == 2)
child.setAttribute("def", "newval")
self.confirm(child.getAttribute("def") == "newval")
self.confirm(child.attributes["def"].value == "newval")
self.confirm(len(child.attributes) == 2)
dom.unlink()
def testDeleteAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
self.confirm(len(child.attributes) == 0)
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
del child.attributes["def"]
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
child.removeAttribute("def")
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttrNS(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
child.setAttributeNS("http://www.w3.org", "xmlns:python",
"http://www.python.org")
child.setAttributeNS("http://www.python.org", "python:abcattr", "foo")
self.confirm(len(child.attributes) == 2)
child.removeAttributeNS("http://www.python.org", "abcattr")
self.confirm(len(child.attributes) == 1)
dom.unlink()
def testRemoveAttributeNode(self):
dom = Document()
child = dom.appendChild(dom.createElement("foo"))
child.setAttribute("spam", "jam")
self.confirm(len(child.attributes) == 1)
node = child.getAttributeNode("spam")
child.removeAttributeNode(node)
self.confirm(len(child.attributes) == 0
and child.getAttributeNode("spam") is None)
dom.unlink()
def testChangeAttr(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
self.confirm(len(el.attributes) == 1)
el.setAttribute("spam", "bam")
# Set this attribute to be an ID and make sure that doesn't change
# when changing the value:
el.setIdAttribute("spam")
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "bam"
and el.attributes["spam"].nodeValue == "bam"
and el.getAttribute("spam") == "bam"
and el.getAttributeNode("spam").isId)
el.attributes["spam"] = "ham"
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam"].isId)
el.setAttribute("spam2", "bam")
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam"
and el.attributes["spam2"].nodeValue == "bam"
and el.getAttribute("spam2") == "bam")
el.attributes["spam2"] = "bam2"
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam2"
and el.attributes["spam2"].nodeValue == "bam2"
and el.getAttribute("spam2") == "bam2")
dom.unlink()
def testGetAttrList(self):
pass
def testGetAttrValues(self): pass
def testGetAttrLength(self): pass
def testGetAttribute(self): pass
def testGetAttributeNS(self): pass
def testGetAttributeNode(self): pass
def testGetElementsByTagNameNS(self):
d="""<foo xmlns:minidom='http://pyxml.sf.net/minidom'>
<minidom:myelem/>
</foo>"""
dom = parseString(d)
elems = dom.getElementsByTagNameNS("http://pyxml.sf.net/minidom",
"myelem")
self.confirm(len(elems) == 1
and elems[0].namespaceURI == "http://pyxml.sf.net/minidom"
and elems[0].localName == "myelem"
and elems[0].prefix == "minidom"
and elems[0].tagName == "minidom:myelem"
and elems[0].nodeName == "minidom:myelem")
dom.unlink()
def get_empty_nodelist_from_elements_by_tagName_ns_helper(self, doc, nsuri,
lname):
nodelist = doc.getElementsByTagNameNS(nsuri, lname)
self.confirm(len(nodelist) == 0)
def testGetEmptyNodeListFromElementsByTagNameNS(self):
doc = parseString('<doc/>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', 'localname')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, '*', 'splat')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', '*')
doc = parseString('<doc xmlns="http://xml.python.org/splat"><e/></doc>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://xml.python.org/splat", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "*", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://somewhere.else.net/not-there", "e")
def testElementReprAndStr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicode(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicodeNS(self):
dom = Document()
el = dom.appendChild(
dom.createElementNS("http://www.slashdot.org", "slash:abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
self.confirm("slash:abc" in string1)
dom.unlink()
def testAttributeRepr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
node = el.setAttribute("abc", "def")
self.confirm(str(node) == repr(node))
dom.unlink()
def testTextNodeRepr(self): pass
def testWriteXML(self):
str = '<?xml version="1.0" ?><a b="c"/>'
dom = parseString(str)
domstr = dom.toxml()
dom.unlink()
self.confirm(str == domstr)
def testAltNewline(self):
str = '<?xml version="1.0" ?>\n<a b="c"/>\n'
dom = parseString(str)
domstr = dom.toprettyxml(newl="\r\n")
dom.unlink()
self.confirm(domstr == str.replace("\n", "\r\n"))
def test_toprettyxml_with_text_nodes(self):
# see issue #4147, text nodes are not indented
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(parseString('<B>A</B>').toprettyxml(),
decl + '<B>A</B>\n')
self.assertEqual(parseString('<C>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\tA\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A</C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n</C>\n')
self.assertEqual(parseString('<C><B>A</B><B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n\t<B>A</B>\n</C>\n')
def test_toprettyxml_with_adjacent_text_nodes(self):
# see issue #4147, adjacent text nodes are indented normally
dom = Document()
elem = dom.createElement('elem')
elem.appendChild(dom.createTextNode('TEXT'))
elem.appendChild(dom.createTextNode('TEXT'))
dom.appendChild(elem)
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(dom.toprettyxml(),
decl + '<elem>\n\tTEXT\n\tTEXT\n</elem>\n')
def test_toprettyxml_preserves_content_of_text_node(self):
# see issue #4147
for str in ('<B>A</B>', '<A><B>C</B></A>'):
dom = parseString(str)
dom2 = parseString(dom.toprettyxml())
self.assertEqual(
dom.getElementsByTagName('B')[0].childNodes[0].toxml(),
dom2.getElementsByTagName('B')[0].childNodes[0].toxml())
def testProcessingInstruction(self):
dom = parseString('<e><?mypi \t\n data \t\n ?></e>')
pi = dom.documentElement.firstChild
self.confirm(pi.target == "mypi"
and pi.data == "data \t\n "
and pi.nodeName == "mypi"
and pi.nodeType == Node.PROCESSING_INSTRUCTION_NODE
and pi.attributes is None
and not pi.hasChildNodes()
and len(pi.childNodes) == 0
and pi.firstChild is None
and pi.lastChild is None
and pi.localName is None
and pi.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def testProcessingInstructionRepr(self): pass
def testTextRepr(self): pass
def testWriteText(self): pass
def testDocumentElement(self): pass
def testTooManyDocumentElements(self):
doc = parseString("<doc/>")
elem = doc.createElement("extra")
# Should raise an exception when adding an extra document element.
self.assertRaises(xml.dom.HierarchyRequestErr, doc.appendChild, elem)
elem.unlink()
doc.unlink()
def testCreateElementNS(self): pass
def testCreateAttributeNS(self): pass
def testParse(self): pass
def testParseString(self): pass
def testComment(self): pass
def testAttrListItem(self): pass
def testAttrListItems(self): pass
def testAttrListItemNS(self): pass
def testAttrListKeys(self): pass
def testAttrListKeysNS(self): pass
def testRemoveNamedItem(self):
doc = parseString("<doc a=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNode("a")
a2 = attrs.removeNamedItem("a")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItem, "a")
def testRemoveNamedItemNS(self):
doc = parseString("<doc xmlns:a='http://xml.python.org/' a:b=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNodeNS("http://xml.python.org/", "b")
a2 = attrs.removeNamedItemNS("http://xml.python.org/", "b")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItemNS,
"http://xml.python.org/", "b")
def testAttrListValues(self): pass
def testAttrListLength(self): pass
def testAttrList__getitem__(self): pass
def testAttrList__setitem__(self): pass
def testSetAttrValueandNodeValue(self): pass
def testParseElement(self): pass
def testParseAttributes(self): pass
def testParseElementNamespaces(self): pass
def testParseAttributeNamespaces(self): pass
def testParseProcessingInstructions(self): pass
def testChildNodes(self): pass
def testFirstChild(self): pass
def testHasChildNodes(self): pass
def _testCloneElementCopiesAttributes(self, e1, e2, test):
attrs1 = e1.attributes
attrs2 = e2.attributes
keys1 = list(attrs1.keys())
keys2 = list(attrs2.keys())
keys1.sort()
keys2.sort()
self.confirm(keys1 == keys2, "clone of element has same attribute keys")
for i in range(len(keys1)):
a1 = attrs1.item(i)
a2 = attrs2.item(i)
self.confirm(a1 is not a2
and a1.value == a2.value
and a1.nodeValue == a2.nodeValue
and a1.namespaceURI == a2.namespaceURI
and a1.localName == a2.localName
, "clone of attribute node has proper attribute values")
self.confirm(a2.ownerElement is e2,
"clone of attribute node correctly owned")
def _setupCloneElement(self, deep):
dom = parseString("<doc attr='value'><foo/></doc>")
root = dom.documentElement
clone = root.cloneNode(deep)
self._testCloneElementCopiesAttributes(
root, clone, "testCloneElement" + (deep and "Deep" or "Shallow"))
# mutilate the original so shared data is detected
root.tagName = root.nodeName = "MODIFIED"
root.setAttribute("attr", "NEW VALUE")
root.setAttribute("added", "VALUE")
return dom, clone
def testCloneElementShallow(self):
dom, clone = self._setupCloneElement(0)
self.confirm(len(clone.childNodes) == 0
and clone.childNodes.length == 0
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"/>'
, "testCloneElementShallow")
dom.unlink()
def testCloneElementDeep(self):
dom, clone = self._setupCloneElement(1)
self.confirm(len(clone.childNodes) == 1
and clone.childNodes.length == 1
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"><foo/></doc>'
, "testCloneElementDeep")
dom.unlink()
def testCloneDocumentShallow(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(0)
self.confirm(doc2 is None,
"testCloneDocumentShallow:"
" shallow cloning of documents makes no sense!")
def testCloneDocumentDeep(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(1)
self.confirm(not (doc.isSameNode(doc2) or doc2.isSameNode(doc)),
"testCloneDocumentDeep: document objects not distinct")
self.confirm(len(doc.childNodes) == len(doc2.childNodes),
"testCloneDocumentDeep: wrong number of Document children")
self.confirm(doc2.documentElement.nodeType == Node.ELEMENT_NODE,
"testCloneDocumentDeep: documentElement not an ELEMENT_NODE")
self.confirm(doc2.documentElement.ownerDocument.isSameNode(doc2),
"testCloneDocumentDeep: documentElement owner is not new document")
self.confirm(not doc.documentElement.isSameNode(doc2.documentElement),
"testCloneDocumentDeep: documentElement should not be shared")
if doc.doctype is not None:
# check the doctype iff the original DOM maintained it
self.confirm(doc2.doctype.nodeType == Node.DOCUMENT_TYPE_NODE,
"testCloneDocumentDeep: doctype not a DOCUMENT_TYPE_NODE")
self.confirm(doc2.doctype.ownerDocument.isSameNode(doc2))
self.confirm(not doc.doctype.isSameNode(doc2.doctype))
def testCloneDocumentTypeDeepOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(1)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == len(doctype.entities)
and clone.entities.item(len(clone.entities)) is None
and len(clone.notations) == len(doctype.notations)
and clone.notations.item(len(clone.notations)) is None
and len(clone.childNodes) == 0)
for i in range(len(doctype.entities)):
se = doctype.entities.item(i)
ce = clone.entities.item(i)
self.confirm((not se.isSameNode(ce))
and (not ce.isSameNode(se))
and ce.nodeName == se.nodeName
and ce.notationName == se.notationName
and ce.publicId == se.publicId
and ce.systemId == se.systemId
and ce.encoding == se.encoding
and ce.actualEncoding == se.actualEncoding
and ce.version == se.version)
for i in range(len(doctype.notations)):
sn = doctype.notations.item(i)
cn = clone.notations.item(i)
self.confirm((not sn.isSameNode(cn))
and (not cn.isSameNode(sn))
and cn.nodeName == sn.nodeName
and cn.publicId == sn.publicId
and cn.systemId == sn.systemId)
def testCloneDocumentTypeDeepNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(1)
self.confirm(clone is None, "testCloneDocumentTypeDeepNotOk")
def testCloneDocumentTypeShallowOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(0)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == 0
and clone.entities.item(0) is None
and len(clone.notations) == 0
and clone.notations.item(0) is None
and len(clone.childNodes) == 0)
def testCloneDocumentTypeShallowNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(0)
self.confirm(clone is None, "testCloneDocumentTypeShallowNotOk")
def check_import_document(self, deep, testName):
doc1 = parseString("<doc/>")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.NotSupportedErr, doc1.importNode, doc2, deep)
def testImportDocumentShallow(self):
self.check_import_document(0, "testImportDocumentShallow")
def testImportDocumentDeep(self):
self.check_import_document(1, "testImportDocumentDeep")
def testImportDocumentTypeShallow(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 0)
def testImportDocumentTypeDeep(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 1)
# Testing attribute clones uses a helper, and should always be deep,
# even if the argument to cloneNode is false.
def check_clone_attribute(self, deep, testName):
doc = parseString("<doc attr='value'/>")
attr = doc.documentElement.getAttributeNode("attr")
self.assertNotEqual(attr, None)
clone = attr.cloneNode(deep)
self.confirm(not clone.isSameNode(attr))
self.confirm(not attr.isSameNode(clone))
self.confirm(clone.ownerElement is None,
testName + ": ownerElement should be None")
self.confirm(clone.ownerDocument.isSameNode(attr.ownerDocument),
testName + ": ownerDocument does not match")
self.confirm(clone.specified,
testName + ": cloned attribute must have specified == True")
def testCloneAttributeShallow(self):
self.check_clone_attribute(0, "testCloneAttributeShallow")
def testCloneAttributeDeep(self):
self.check_clone_attribute(1, "testCloneAttributeDeep")
def check_clone_pi(self, deep, testName):
doc = parseString("<?target data?><doc/>")
pi = doc.firstChild
self.assertEqual(pi.nodeType, Node.PROCESSING_INSTRUCTION_NODE)
clone = pi.cloneNode(deep)
self.confirm(clone.target == pi.target
and clone.data == pi.data)
def testClonePIShallow(self):
self.check_clone_pi(0, "testClonePIShallow")
def testClonePIDeep(self):
self.check_clone_pi(1, "testClonePIDeep")
def testNormalize(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalize -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "firstsecond"
, "testNormalize -- result")
doc.unlink()
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
doc.normalize()
self.confirm(len(root.childNodes) == 0
and root.childNodes.length == 0,
"testNormalize -- single empty node removed")
doc.unlink()
def testNormalizeCombineAndNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeCombineAndNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild.data == "firstsecond"
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeCombinedAndNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithPrevSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithPrevSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "first"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithPrevSibling -- result")
doc.unlink()
def testNormalizeDeleteWithNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "second"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithTwoNonTextSiblings(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createElement("i"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeDeleteWithTwoSiblings -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeDeleteWithTwoSiblings -- result")
doc.unlink()
def testNormalizeDeleteAndCombine(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("fourth"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 5
and root.childNodes.length == 5,
"testNormalizeDeleteAndCombine -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "secondfourth"
and root.firstChild.previousSibling is None
and root.firstChild.nextSibling is None
, "testNormalizeDeleteAndCombine -- result")
doc.unlink()
def testNormalizeRecursion(self):
doc = parseString("<doc>"
"<o>"
"<i/>"
"t"
#
#x
"</o>"
"<o>"
"<o>"
"t2"
#x2
"</o>"
"t3"
#x3
"</o>"
#
"</doc>")
root = doc.documentElement
root.childNodes[0].appendChild(doc.createTextNode(""))
root.childNodes[0].appendChild(doc.createTextNode("x"))
root.childNodes[1].childNodes[0].appendChild(doc.createTextNode("x2"))
root.childNodes[1].appendChild(doc.createTextNode("x3"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and len(root.childNodes[0].childNodes) == 4
and root.childNodes[0].childNodes.length == 4
and len(root.childNodes[1].childNodes) == 3
and root.childNodes[1].childNodes.length == 3
and len(root.childNodes[1].childNodes[0].childNodes) == 2
and root.childNodes[1].childNodes[0].childNodes.length == 2
, "testNormalize2 -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and len(root.childNodes[0].childNodes) == 2
and root.childNodes[0].childNodes.length == 2
and len(root.childNodes[1].childNodes) == 2
and root.childNodes[1].childNodes.length == 2
and len(root.childNodes[1].childNodes[0].childNodes) == 1
and root.childNodes[1].childNodes[0].childNodes.length == 1
, "testNormalize2 -- childNodes lengths")
self.confirm(root.childNodes[0].childNodes[1].data == "tx"
and root.childNodes[1].childNodes[0].childNodes[0].data == "t2x2"
and root.childNodes[1].childNodes[1].data == "t3x3"
, "testNormalize2 -- joined text fields")
self.confirm(root.childNodes[0].childNodes[1].nextSibling is None
and root.childNodes[0].childNodes[1].previousSibling
is root.childNodes[0].childNodes[0]
and root.childNodes[0].childNodes[0].previousSibling is None
and root.childNodes[0].childNodes[0].nextSibling
is root.childNodes[0].childNodes[1]
and root.childNodes[1].childNodes[1].nextSibling is None
and root.childNodes[1].childNodes[1].previousSibling
is root.childNodes[1].childNodes[0]
and root.childNodes[1].childNodes[0].previousSibling is None
and root.childNodes[1].childNodes[0].nextSibling
is root.childNodes[1].childNodes[1]
, "testNormalize2 -- sibling pointers")
doc.unlink()
def testBug0777884(self):
doc = parseString("<o>text</o>")
text = doc.documentElement.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
# Should run quietly, doing nothing.
text.normalize()
doc.unlink()
def testBug1433694(self):
doc = parseString("<o><i/>t</o>")
node = doc.documentElement
node.childNodes[1].nodeValue = ""
node.normalize()
self.confirm(node.childNodes[-1].nextSibling is None,
"Final child's .nextSibling should be None")
def testSiblings(self):
doc = parseString("<doc><?pi?>text?<elm/></doc>")
root = doc.documentElement
(pi, text, elm) = root.childNodes
self.confirm(pi.nextSibling is text and
pi.previousSibling is None and
text.nextSibling is elm and
text.previousSibling is pi and
elm.nextSibling is None and
elm.previousSibling is text, "testSiblings")
doc.unlink()
def testParents(self):
doc = parseString(
"<doc><elm1><elm2/><elm2><elm3/></elm2></elm1></doc>")
root = doc.documentElement
elm1 = root.childNodes[0]
(elm2a, elm2b) = elm1.childNodes
elm3 = elm2b.childNodes[0]
self.confirm(root.parentNode is doc and
elm1.parentNode is root and
elm2a.parentNode is elm1 and
elm2b.parentNode is elm1 and
elm3.parentNode is elm2b, "testParents")
doc.unlink()
def testNodeListItem(self):
doc = parseString("<doc><e/><e/></doc>")
children = doc.childNodes
docelem = children[0]
self.confirm(children[0] is children.item(0)
and children.item(1) is None
and docelem.childNodes.item(0) is docelem.childNodes[0]
and docelem.childNodes.item(1) is docelem.childNodes[1]
and docelem.childNodes.item(0).childNodes.item(0) is None,
"test NodeList.item()")
doc.unlink()
def testSAX2DOM(self):
from xml.dom import pulldom
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.confirm(text1.previousSibling is None and
text1.nextSibling is elm1 and
elm1.previousSibling is text1 and
elm1.nextSibling is text2 and
text2.previousSibling is elm1 and
text2.nextSibling is None and
text3.previousSibling is None and
text3.nextSibling is None, "testSAX2DOM - siblings")
self.confirm(root.parentNode is doc and
text1.parentNode is root and
elm1.parentNode is root and
text2.parentNode is root and
text3.parentNode is elm1, "testSAX2DOM - parents")
doc.unlink()
def testEncodings(self):
doc = parseString('<foo>€</foo>')
self.assertEqual(doc.toxml(),
'<?xml version="1.0" ?><foo>\u20ac</foo>')
self.assertEqual(doc.toxml('utf-8'),
b'<?xml version="1.0" encoding="utf-8"?><foo>\xe2\x82\xac</foo>')
self.assertEqual(doc.toxml('iso-8859-15'),
b'<?xml version="1.0" encoding="iso-8859-15"?><foo>\xa4</foo>')
# Verify that character decoding errors throw exceptions instead
# of crashing
self.assertRaises(UnicodeDecodeError, parseString,
b'<fran\xe7ais>Comment \xe7a va ? Tr\xe8s bien ?</fran\xe7ais>')
doc.unlink()
class UserDataHandler:
called = 0
def handle(self, operation, key, data, src, dst):
dst.setUserData(key, data + 1, self)
src.setUserData(key, None, None)
self.called = 1
def testUserData(self):
dom = Document()
n = dom.createElement('e')
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", 12, 12)
n.setUserData("bar", 13, 13)
self.confirm(n.getUserData("foo") == 12)
self.confirm(n.getUserData("bar") == 13)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
self.confirm(n.getUserData("bar") == 13)
handler = self.UserDataHandler()
n.setUserData("bar", 12, handler)
c = n.cloneNode(1)
self.confirm(handler.called
and n.getUserData("bar") is None
and c.getUserData("bar") == 13)
n.unlink()
c.unlink()
dom.unlink()
def checkRenameNodeSharedConstraints(self, doc, node):
# Make sure illegal NS usage is detected:
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, node,
"http://xml.python.org/ns", "xmlns:foo")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.WrongDocumentErr, doc2.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
def testRenameAttribute(self):
doc = parseString("<doc a='v'/>")
elem = doc.documentElement
attrmap = elem.attributes
attr = elem.attributes['a']
# Simple renaming
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "b")
self.confirm(attr.name == "b"
and attr.nodeName == "b"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b").isSameNode(attr)
and attrmap["b"].isSameNode(attr)
and attr.ownerDocument.isSameNode(doc)
and attr.ownerElement.isSameNode(elem))
# Rename to have a namespace, no prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns", "c")
self.confirm(attr.name == "c"
and attr.nodeName == "c"
and attr.localName == "c"
and attr.namespaceURI == "http://xml.python.org/ns"
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c").isSameNode(attr)
and attrmap["c"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns", "c")].isSameNode(attr))
# Rename to have a namespace, with prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns2", "p:d")
self.confirm(attr.name == "p:d"
and attr.nodeName == "p:d"
and attr.localName == "d"
and attr.namespaceURI == "http://xml.python.org/ns2"
and attr.prefix == "p"
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("p:d").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns2", "d").isSameNode(attr)
and attrmap["p:d"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns2", "d")].isSameNode(attr))
# Rename back to a simple non-NS node
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "e")
self.confirm(attr.name == "e"
and attr.nodeName == "e"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNode("p:d") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("e").isSameNode(attr)
and attrmap["e"].isSameNode(attr))
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, attr,
"http://xml.python.org/ns", "xmlns")
self.checkRenameNodeSharedConstraints(doc, attr)
doc.unlink()
def testRenameElement(self):
doc = parseString("<doc/>")
elem = doc.documentElement
# Simple renaming
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "a")
self.confirm(elem.tagName == "a"
and elem.nodeName == "a"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, no prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns", "b")
self.confirm(elem.tagName == "b"
and elem.nodeName == "b"
and elem.localName == "b"
and elem.namespaceURI == "http://xml.python.org/ns"
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, with prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns2", "p:c")
self.confirm(elem.tagName == "p:c"
and elem.nodeName == "p:c"
and elem.localName == "c"
and elem.namespaceURI == "http://xml.python.org/ns2"
and elem.prefix == "p"
and elem.ownerDocument.isSameNode(doc))
# Rename back to a simple non-NS node
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "d")
self.confirm(elem.tagName == "d"
and elem.nodeName == "d"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
self.checkRenameNodeSharedConstraints(doc, elem)
doc.unlink()
def testRenameOther(self):
# We have to create a comment node explicitly since not all DOM
# builders used with minidom add comments to the DOM.
doc = xml.dom.minidom.getDOMImplementation().createDocument(
xml.dom.EMPTY_NAMESPACE, "e", None)
node = doc.createComment("comment")
self.assertRaises(xml.dom.NotSupportedErr, doc.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
doc.unlink()
def testWholeText(self):
doc = parseString("<doc>a</doc>")
elem = doc.documentElement
text = elem.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
self.checkWholeText(text, "a")
elem.appendChild(doc.createTextNode("b"))
self.checkWholeText(text, "ab")
elem.insertBefore(doc.createCDATASection("c"), text)
self.checkWholeText(text, "cab")
# make sure we don't cross other nodes
splitter = doc.createComment("comment")
elem.appendChild(splitter)
text2 = doc.createTextNode("d")
elem.appendChild(text2)
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createElement("x")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createProcessingInstruction("y", "z")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
elem.removeChild(splitter)
self.checkWholeText(text, "cabd")
self.checkWholeText(text2, "cabd")
def testPatch1094164(self):
doc = parseString("<doc><e/></doc>")
elem = doc.documentElement
e = elem.firstChild
self.confirm(e.parentNode is elem, "Before replaceChild()")
# Check that replacing a child with itself leaves the tree unchanged
elem.replaceChild(e, e)
self.confirm(e.parentNode is elem, "After replaceChild()")
def testReplaceWholeText(self):
def setup():
doc = parseString("<doc>a<e/>d</doc>")
elem = doc.documentElement
text1 = elem.firstChild
text2 = elem.lastChild
splitter = text1.nextSibling
elem.insertBefore(doc.createTextNode("b"), splitter)
elem.insertBefore(doc.createCDATASection("c"), text1)
return doc, elem, text1, splitter, text2
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text2, "d")
self.confirm(len(elem.childNodes) == 3)
doc, elem, text1, splitter, text2 = setup()
text = text2.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text1, "cab")
self.confirm(len(elem.childNodes) == 5)
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("")
self.checkWholeText(text2, "d")
self.confirm(text is None
and len(elem.childNodes) == 2)
def testSchemaType(self):
doc = parseString(
"<!DOCTYPE doc [\n"
" <!ENTITY e1 SYSTEM 'http://xml.python.org/e1'>\n"
" <!ENTITY e2 SYSTEM 'http://xml.python.org/e2'>\n"
" <!ATTLIST doc id ID #IMPLIED \n"
" ref IDREF #IMPLIED \n"
" refs IDREFS #IMPLIED \n"
" enum (a|b) #IMPLIED \n"
" ent ENTITY #IMPLIED \n"
" ents ENTITIES #IMPLIED \n"
" nm NMTOKEN #IMPLIED \n"
" nms NMTOKENS #IMPLIED \n"
" text CDATA #IMPLIED \n"
" >\n"
"]><doc id='name' notid='name' text='splat!' enum='b'"
" ref='name' refs='name name' ent='e1' ents='e1 e2'"
" nm='123' nms='123 abc' />")
elem = doc.documentElement
# We don't want to rely on any specific loader at this point, so
# just make sure we can get to all the names, and that the
# DTD-based namespace is right. The names can vary by loader
# since each supports a different level of DTD information.
t = elem.schemaType
self.confirm(t.name is None
and t.namespace == xml.dom.EMPTY_NAMESPACE)
names = "id notid text enum ref refs ent ents nm nms".split()
for name in names:
a = elem.getAttributeNode(name)
t = a.schemaType
self.confirm(hasattr(t, "name")
and t.namespace == xml.dom.EMPTY_NAMESPACE)
def testSetIdAttribute(self):
doc = parseString("<doc a1='v' a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNode("a1")
a2 = e.getAttributeNode("a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttribute("a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttribute("a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttribute("a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(doc.getElementById("v") is None
and e.isSameNode(doc.getElementById("w"))
and not a1.isId
and a2.isId
and not a3.isId)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNS(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNS(NS1, "a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNS(NS2, "a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNode(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNode(a1)
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNode(a2)
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testPickledDocument(self):
doc = parseString("<?xml version='1.0' encoding='us-ascii'?>\n"
"<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
" 'http://xml.python.org/system' [\n"
" <!ELEMENT e EMPTY>\n"
" <!ENTITY ent SYSTEM 'http://xml.python.org/entity'>\n"
"]><doc attr='value'> text\n"
"<?pi sample?> <!-- comment --> <e/> </doc>")
s = pickle.dumps(doc)
doc2 = pickle.loads(s)
stack = [(doc, doc2)]
while stack:
n1, n2 = stack.pop()
self.confirm(n1.nodeType == n2.nodeType
and len(n1.childNodes) == len(n2.childNodes)
and n1.nodeName == n2.nodeName
and not n1.isSameNode(n2)
and not n2.isSameNode(n1))
if n1.nodeType == Node.DOCUMENT_TYPE_NODE:
len(n1.entities)
len(n2.entities)
len(n1.notations)
len(n2.notations)
self.confirm(len(n1.entities) == len(n2.entities)
and len(n1.notations) == len(n2.notations))
for i in range(len(n1.notations)):
# XXX this loop body doesn't seem to be executed?
no1 = n1.notations.item(i)
no2 = n1.notations.item(i)
self.confirm(no1.name == no2.name
and no1.publicId == no2.publicId
and no1.systemId == no2.systemId)
stack.append((no1, no2))
for i in range(len(n1.entities)):
e1 = n1.entities.item(i)
e2 = n2.entities.item(i)
self.confirm(e1.notationName == e2.notationName
and e1.publicId == e2.publicId
and e1.systemId == e2.systemId)
stack.append((e1, e2))
if n1.nodeType != Node.DOCUMENT_NODE:
self.confirm(n1.ownerDocument.isSameNode(doc)
and n2.ownerDocument.isSameNode(doc2))
for i in range(len(n1.childNodes)):
stack.append((n1.childNodes[i], n2.childNodes[i]))
def testSerializeCommentNodeWithDoubleHyphen(self):
doc = create_doc_without_doctype()
doc.appendChild(doc.createComment("foo--bar"))
self.assertRaises(ValueError, doc.toxml)
def testEmptyXMLNSValue(self):
doc = parseString("<element xmlns=''>\n"
"<foo/>\n</element>")
doc2 = parseString(doc.toxml())
self.confirm(doc2.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def test_main():
run_unittest(MinidomTest)
if __name__ == "__main__":
test_main()
|
|
# -*- coding: utf-8 -*-
"""
Main module for python-amazon-mws package.
"""
from __future__ import absolute_import
import base64
import datetime
import hashlib
import hmac
import re
import warnings
from zipfile import ZipFile
from io import BytesIO
from requests import request
from requests.exceptions import HTTPError
from enum import Enum
from . import utils
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
from xml.etree.ElementTree import ParseError as XMLError
__version__ = '1.0.0dev11'
class Marketplaces(Enum):
"""
Format: Country code: endpoint, marketplace_id.
"""
AU = ('https://mws.amazonservices.com.au', 'A39IBJ37TRP1C6')
BR = ('https://mws.amazonservices.com', 'A2Q3Y263D00KWC')
CA = ('https://mws.amazonservices.ca', 'A2EUQ1WTGCTBG2')
CN = ('https://mws.amazonservices.com.cn', 'AAHKV2X7AFYLW')
DE = ('https://mws-eu.amazonservices.com', 'A1PA6795UKMFR9')
ES = ('https://mws-eu.amazonservices.com', 'A1RKKUPIHCS9HS')
FR = ('https://mws-eu.amazonservices.com', 'A13V1IB3VIYZZH')
IN = ('https://mws.amazonservices.in', 'A21TJRUUN4KGV')
IT = ('https://mws-eu.amazonservices.com', 'APJ6JRA9NG5V4')
JP = ('https://mws.amazonservices.jp', 'A1VC38T7YXB528')
MX = ('https://mws.amazonservices.com.mx', 'A1AM78C64UM0Y8')
UK = ('https://mws-eu.amazonservices.com', 'A1F83G8C2ARO7P')
US = ('https://mws.amazonservices.com', 'ATVPDKIKX0DER')
def __init__(self, endpoint, marketplace_id):
"""Easy dot access like: Marketplaces.endpoint ."""
self.endpoint = endpoint
self.marketplace_id = marketplace_id
class MWSError(Exception):
"""
Main MWS Exception class
"""
# Allows quick access to the response object.
# Do not rely on this attribute, always check if its not None.
response = None
def calc_request_description(params):
"""
Returns a flatted string with the request description, built from the params dict.
Entries are escaped with urllib quote method, formatted as "key=value", and joined with "&".
"""
"""
Builds the request description as a single string from the set of params.
Each key-value pair takes the form "key=value"
Sets of "key=value" pairs are joined by "&".
Keys should appear in alphabetical order in the result string.
Example:
params = {'foo': 1, 'bar': 4, 'baz': 'potato'}
Returns:
"bar=4&baz=potato&foo=1"
"""
description_items = []
for item in sorted(params.keys()):
encoded_val = params[item]
description_items.append('{}={}'.format(item, encoded_val))
return '&'.join(description_items)
def clean_params(params):
"""Input cleanup and prevent a lot of common input mistakes."""
# silently remove parameter where values are empty
params = {k: v for k, v in params.items() if v}
params_enc = dict()
for key, value in params.items():
if isinstance(value, (dict, list, set, tuple)):
message = 'expected string or datetime datatype, got {},'\
'for key {} and value {}'.format(
type(value), key, str(value))
raise MWSError(message)
if isinstance(value, (datetime.datetime, datetime.date)):
value = value.isoformat()
if isinstance(value, bool):
value = str(value).lower()
value = str(value)
params_enc[key] = quote(value, safe='-_.~')
return params_enc
def remove_namespace(xml):
"""
Strips the namespace from XML document contained in a string.
Returns the stripped string.
"""
regex = re.compile(' xmlns(:ns2)?="[^"]+"|(ns2:)|(xml:)')
return regex.sub('', xml)
class DictWrapper(object):
"""
Main class that converts XML data to a parsed response object as a tree of ObjectDicts,
stored in the .parsed property.
"""
# TODO create a base class for DictWrapper and DataWrapper with all the keys we expect in responses.
# This will make it easier to use either class in place of each other.
# Either this, or pile everything into DataWrapper and make it able to handle all cases.
def __init__(self, xml, rootkey=None):
self.original = xml
self.response = None
self._rootkey = rootkey
self._mydict = utils.XML2Dict().fromstring(remove_namespace(xml))
self._response_dict = self._mydict.get(list(self._mydict.keys())[0], self._mydict)
@property
def parsed(self):
"""
Provides access to the parsed contents of an XML response as a tree of ObjectDicts.
"""
if self._rootkey:
return self._response_dict.get(self._rootkey, self._response_dict)
return self._response_dict
class DataWrapper(object):
"""
Text wrapper in charge of validating the hash sent by Amazon.
"""
def __init__(self, data, headers):
self.original = data
self.response = None
self.headers = headers
if 'content-md5' in self.headers:
hash_ = utils.calc_md5(self.original)
if self.headers['content-md5'].encode() != hash_:
raise MWSError("Wrong Content length, maybe amazon error...")
@property
def parsed(self):
"""
Similar to the `parsed` property of DictWrapper, this provides a similar interface for a data response
that could not be parsed as XML.
"""
return self.original
"""
To return an unzipped file object based on the content type"
"""
@property
def unzipped(self):
"""
If the response is comprised of a zip file, returns a ZipFile object of those file contents.
Otherwise, returns None.
"""
if self.headers['content-type'] == 'application/zip':
try:
with ZipFile(BytesIO(self.original)) as unzipped_fileobj:
# unzipped the zip file contents
unzipped_fileobj.extractall()
# return original zip file object to the user
return unzipped_fileobj
except Exception as exc:
raise MWSError(str(exc))
return None # 'The response is not a zipped file.'
class MWS(object):
"""
Base Amazon API class
"""
# This is used to post/get to the different uris used by amazon per api
# ie. /Orders/2011-01-01
# All subclasses must define their own URI only if needed
URI = "/"
# The API version varies in most amazon APIs
VERSION = "2009-01-01"
# There seem to be some xml namespace issues. therefore every api subclass
# is recommended to define its namespace, so that it can be referenced
# like so AmazonAPISubclass.NAMESPACE.
# For more information see http://stackoverflow.com/a/8719461/389453
NAMESPACE = ''
# In here we name each of the operations available to the subclass
# that have 'ByNextToken' operations associated with them.
# If the Operation is not listed here, self.action_by_next_token
# will raise an error.
NEXT_TOKEN_OPERATIONS = []
# Some APIs are available only to either a "Merchant" or "Seller"
# the type of account needs to be sent in every call to the amazon MWS.
# This constant defines the exact name of the parameter Amazon expects
# for the specific API being used.
# All subclasses need to define this if they require another account type
# like "Merchant" in which case you define it like so.
# ACCOUNT_TYPE = "Merchant"
# Which is the name of the parameter for that specific account type.
# For using proxy you need to init this class with one more parameter proxies. It must look like 'ip_address:port'
# if proxy without auth and 'login:password@ip_address:port' if proxy with auth
ACCOUNT_TYPE = "SellerId"
def __init__(self, access_key, secret_key, account_id,
region='US', uri='', version='', auth_token='', proxy=None):
self.access_key = access_key
self.secret_key = secret_key
self.account_id = account_id
self.auth_token = auth_token
self.version = version or self.VERSION
self.uri = uri or self.URI
self.proxy = proxy
# * TESTING FLAGS * #
self._test_request_params = False
if region in Marketplaces.__members__:
self.domain = Marketplaces[region].endpoint
else:
error_msg = 'Incorrect region supplied: {region}. ' \
'Must be one of the following: {regions}'.format(
region=region,
regions=', '.join(Marketplaces.__members__.keys()),
)
raise MWSError(error_msg)
def get_default_params(self):
"""
Get the parameters required in all MWS requests
"""
params = {
'AWSAccessKeyId': self.access_key,
self.ACCOUNT_TYPE: self.account_id,
'SignatureVersion': '2',
'Timestamp': utils.get_utc_timestamp(),
'Version': self.version,
'SignatureMethod': 'HmacSHA256',
}
if self.auth_token:
params['MWSAuthToken'] = self.auth_token
# TODO current tests only check for auth_token being set.
# need a branch test to check for auth_token being skipped (no key present)
return params
def make_request(self, extra_data, method="GET", **kwargs):
"""
Make request to Amazon MWS API with these parameters
"""
params = self.get_default_params()
proxies = self.get_proxies()
params.update(extra_data)
params = clean_params(params)
if self._test_request_params:
# Testing method: return the params from this request before the request is made.
return params
# TODO: All current testing stops here. More branches needed.
request_description = calc_request_description(params)
signature = self.calc_signature(method, request_description)
url = "{domain}{uri}?{description}&Signature={signature}".format(
domain=self.domain,
uri=self.uri,
description=request_description,
signature=quote(signature),
)
headers = {'User-Agent': 'python-amazon-mws/{} (Language=Python)'.format(__version__)}
headers.update(kwargs.get('extra_headers', {}))
try:
# Some might wonder as to why i don't pass the params dict as the params argument to request.
# My answer is, here i have to get the url parsed string of params in order to sign it, so
# if i pass the params dict as params to request, request will repeat that step because it will need
# to convert the dict to a url parsed string, so why do it twice if i can just pass the full url :).
response = request(method, url, data=kwargs.get(
'body', ''), headers=headers, proxies=proxies, timeout=kwargs.get('timeout', 300))
response.raise_for_status()
# When retrieving data from the response object,
# be aware that response.content returns the content in bytes while response.text calls
# response.content and converts it to unicode.
data = response.content
# I do not check the headers to decide which content structure to server simply because sometimes
# Amazon's MWS API returns XML error responses with "text/plain" as the Content-Type.
rootkey = kwargs.get('rootkey', extra_data.get("Action") + "Result")
try:
try:
parsed_response = DictWrapper(data, rootkey)
except TypeError: # raised when using Python 3 and trying to remove_namespace()
# When we got CSV as result, we will got error on this
parsed_response = DictWrapper(response.text, rootkey)
except XMLError:
parsed_response = DataWrapper(data, response.headers)
except HTTPError as exc:
error = MWSError(str(exc.response.text))
error.response = exc.response
raise error
# Store the response object in the parsed_response for quick access
parsed_response.response = response
return parsed_response
def get_proxies(self):
proxies = {"http": None, "https": None}
if self.proxy:
# TODO need test to enter here
proxies = {
"http": "http://{}".format(self.proxy),
"https": "https://{}".format(self.proxy),
}
return proxies
def get_service_status(self):
"""
Returns a GREEN, GREEN_I, YELLOW or RED status.
Depending on the status/availability of the API its being called from.
"""
return self.make_request(extra_data=dict(Action='GetServiceStatus'))
def action_by_next_token(self, action, next_token):
"""
Run a '...ByNextToken' action for the given action.
If the action is not listed in self.NEXT_TOKEN_OPERATIONS, MWSError is raised.
Action is expected NOT to include 'ByNextToken'
at the end of its name for this call: function will add that by itself.
"""
if action not in self.NEXT_TOKEN_OPERATIONS:
# TODO Would like a test entering here.
# Requires a dummy API class to be written that will trigger it.
raise MWSError((
"{} action not listed in this API's NEXT_TOKEN_OPERATIONS. "
"Please refer to documentation."
).format(action))
action = '{}ByNextToken'.format(action)
data = {
'Action': action,
'NextToken': next_token,
}
return self.make_request(data, method="POST")
def calc_signature(self, method, request_description):
"""
Calculate MWS signature to interface with Amazon
Args:
method (str)
request_description (str)
"""
sig_data = '\n'.join([
method,
self.domain.replace('https://', '').lower(),
self.uri,
request_description
])
return base64.b64encode(hmac.new(self.secret_key.encode(), sig_data.encode(), hashlib.sha256).digest())
def enumerate_param(self, param, values):
"""
DEPRECATED.
Please use `utils.enumerate_param` for one param, or
`utils.enumerate_params` for multiple params.
"""
# TODO remove in 1.0 release.
# No tests needed.
warnings.warn((
"Please use `utils.enumerate_param` for one param, or "
"`utils.enumerate_params` for multiple params."
), DeprecationWarning)
return utils.enumerate_param(param, values)
|
|
import os, glob
import h5py
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def main():
num = extract_num_trajectory_from_out_files()
an = eval_an_trajectory_at_num_time_points( num )
plot_trajectories( num , an )
def extract_num_trajectory_from_out_files():
out_files = find_necessary_out_files()
num_trajectory = []
for f in out_files:
num_trajectory.append( extract_time_pos_mom( f ) )
num_trajectory = remove_empty_and_sort_by_time( num_trajectory )
num_trajectory = np.array( num_trajectory,
dtype=[('t','float'),
('x','float'), ('y','float'), ('z','float'),
('px','float'), ('py','float'), ('pz','float') ] )
return( num_trajectory )
def remove_empty_and_sort_by_time( num_trajectory ):
removed_empty = [ x for x in num_trajectory if x ]
sorted_by_time = sorted( removed_empty, key = lambda x: x[0] )
return ( sorted_by_time )
def find_necessary_out_files():
os.chdir("./")
h5files = []
for file in glob.glob("single_particle_mgn_field_[0-9]*.h5"):
h5files.append( file )
return h5files
def extract_time_pos_mom( h5file ):
h5 = h5py.File( h5file, mode="r")
t = h5["/TimeGrid"].attrs["current_time"][0]
t_pos_mom = ()
if ( len(h5["/ParticleSources/emit_single_particle"]) > 0 and
len(h5["/ParticleSources/emit_single_particle/position_x"]) > 0 ):
x = h5["/ParticleSources/emit_single_particle/position_x"][0]
y = h5["/ParticleSources/emit_single_particle/position_y"][0]
z = h5["/ParticleSources/emit_single_particle/position_z"][0]
px = h5["/ParticleSources/emit_single_particle/momentum_x"][0]
py = h5["/ParticleSources/emit_single_particle/momentum_y"][0]
pz = h5["/ParticleSources/emit_single_particle/momentum_z"][0]
t_pos_mom = (t, x, y, z, px, py, pz)
h5.close()
return( t_pos_mom )
def eval_an_trajectory_at_num_time_points( num_trajectory ):
initial_out_file = "single_particle_mgn_field_0000000.h5"
initial_h5 = h5py.File( initial_out_file, mode="r")
global B0, q, m
B0 = extract_magn_field( initial_h5 )
q, m = extract_particle_charge_and_mass( initial_h5 )
global x0, y0, z0, px0, py0, pz0, vx0, vy0, vz0
x0, y0, z0, px0, py0, pz0 = extract_initial_pos_and_mom( initial_h5 )
vx0, vy0, vz0 = px0/m, py0/m, pz0/m
initial_h5.close()
global v_perp_len, v_prll
v_perp_len = np.sqrt( vx0**2 + vy0**2 )
v_prll = vz0
global speed_of_light, larmor_rad, larmor_freq
speed_of_light = 3e10 # todo
larmor_rad = m / abs(q) * v_perp_len / B0 * speed_of_light
larmor_freq = abs(q) / m * B0 / speed_of_light
an_trajectory = np.empty_like( num_trajectory )
for i, t in enumerate( num_trajectory['t'] ):
x, y, z = coords( t )
vx, vy, vz = velocities(t)
px, py, pz = vx * m, vy * m, vz * m
an_trajectory[i] = ( t, x, y ,z, px, py, pz )
return( an_trajectory )
def extract_magn_field( h5 ):
B0 = h5["/ExternalFields/mgn_uni"].attrs["magnetic_uniform_field_z"][0]
return B0
def extract_particle_charge_and_mass( h5 ):
q = h5["/ParticleSources/emit_single_particle"].attrs["charge"][0]
m = h5["/ParticleSources/emit_single_particle"].attrs["mass"][0]
return (q, m)
def extract_initial_pos_and_mom( h5 ):
x0 = h5["/ParticleSources/emit_single_particle/position_x"][0]
y0 = h5["/ParticleSources/emit_single_particle/position_y"][0]
z0 = h5["/ParticleSources/emit_single_particle/position_z"][0]
px0 = h5["/ParticleSources/emit_single_particle/momentum_x"][0]
py0 = h5["/ParticleSources/emit_single_particle/momentum_y"][0]
pz0 = h5["/ParticleSources/emit_single_particle/momentum_z"][0]
return( x0, y0, z0, px0, py0, pz0 )
def velocities(t):
sign_larmor_freq = larmor_freq * np.sign( q )
vx = vx0 * np.cos( sign_larmor_freq * t ) + vy0 * np.sin( sign_larmor_freq * t )
vy = -vx0 * np.sin( sign_larmor_freq * t ) + vy0 * np.cos( sign_larmor_freq * t )
vz = vz0
return ( vx, vy, vz )
def coords(t):
sign_larmor_freq = larmor_freq * np.sign( q )
x = x0 + 1 / sign_larmor_freq * ( vx0 * np.sin( sign_larmor_freq * t ) -
vy0 * np.cos( sign_larmor_freq * t ) + vy0 )
y = y0 + 1 / sign_larmor_freq * ( vx0 * np.cos( sign_larmor_freq * t ) +
vy0 * np.sin( sign_larmor_freq * t ) - vx0 )
z = z0 + vz0 * t
return ( x, y, z )
def plot_trajectories( num , an ):
plot_3d( num, an )
plot_2d( num, an )
plot_kin_en( num , an )
def plot_3d( num, an ):
fig = plt.figure()
ax = fig.gca( projection='3d' )
ax.plot( an['x'], an['y'], an['z'], 'g-', linewidth = 3, label = "An" )
ax.plot( num['x'][::2], num['y'][::2], num['z'][::2], 'b.',
markersize = 6, label = "Num" )
ax.set_xlabel('X [cm]')
ax.set_ylabel('Y [cm]')
ax.set_zlabel('Z [cm]')
plt.legend( loc = 'upper left', title="3d" )
#plt.show()
print( 'Saving 3d trajectory plot to "3d.png"' )
plt.savefig('3d.png')
def plot_2d( num, an ):
plt.figure( figsize=( 16, 6 ) )
plt.subplots_adjust( left = None, bottom = None,
right = None, top = None,
wspace = 0.4, hspace = None )
#XY
ax = plt.subplot(131)
plt.plot( num['x'], num['y'],
linestyle='', marker='o',
label = "Num" )
plt.plot( an['x'], an['y'],
linestyle='-', marker='', lw = 3,
label = "An" )
ax.set_xlabel('X [cm]')
ax.set_ylabel('Y [cm]')
plt.legend( loc = 'upper left', title="XY", bbox_to_anchor=(-0.6,1) )
#ZX
ax = plt.subplot(132)
plt.plot( num['z'], num['x'],
linestyle='', marker='o',
label = "Num" )
plt.plot( an['z'], an['x'],
linestyle='-', marker='', lw = 3,
label = "An" )
ax.set_xlabel('Z [cm]')
ax.set_ylabel('X [cm]')
ax.text(0.05, 0.92, 'ZX',
transform=ax.transAxes, fontsize=15)
#plt.legend( loc = 'upper left', title="ZX" )
#ZY
ax = plt.subplot(133)
plt.plot( num['z'], num['y'],
linestyle='', marker='o',
label = "Num" )
plt.plot( an['z'], an['y'],
linestyle='-', marker='', lw = 3,
label = "An" )
ax.set_xlabel('Z [cm]')
ax.set_ylabel('Y [cm]')
ax.text(0.88, 0.92, 'ZY',
transform=ax.transAxes, fontsize=15)
#plt.legend( loc = 'upper left', title="ZY" )
print( 'Saving 2d trajectory projection plots to "2d.png"' )
plt.savefig('2d.png')
def plot_kin_en( num , an ):
E_num = ( num['px']**2 + num['py']**2 + num['pz']**2 ) / ( 2 * m )
E_an = ( an['px']**2 + an['py']**2 + an['pz']**2 ) / ( 2 * m )
t = num['t']
plt.figure()
axes = plt.gca()
axes.set_xlabel( "t [s]" )
axes.set_ylabel( "E [erg]" )
# axes.set_ylim( [min( E_an.min(), E_num.min() ),
# max( E_an.max(), E_num.max() ) ] )
line, = plt.plot( t, E_num, 'o' )
line.set_label( "Num" )
line, = plt.plot( t, E_an, ls = 'solid', lw = 3 )
line.set_label( "An" )
plt.legend( loc = 'upper right' )
print( 'Saving kinetic energy comparison plot to "kin_en.png"' )
plt.savefig( 'kin_en.png' )
main()
|
|
# -*- coding: utf-8 -*-
import requests as r
from simplejson import JSONDecodeError
GET = 1
POST = 2
class Sia(object):
def __init__(self, host='http://localhost', port='9980'):
self.host = host
self.port = port
@property
def _url_base(self):
return self.host + ":" + str(self.port)
def __call__(self, verb, url, data=None):
user_agent = {'User-agent': 'Sia-Agent'}
full_url = self._url_base + url
if verb == GET:
resp = r.get(full_url, headers=user_agent, params=data)
elif verb == POST:
resp = r.post(full_url, headers=user_agent, data=data)
try:
return resp.json()
except JSONDecodeError:
return resp.ok
def get_consensus(self, **data):
url = '/consensus'
return self(GET, url, data)
def set_consensus_validate_transactionset(self, **data):
url = '/consensus/validate/transactionset'
return self(POST, url, data)
def get_daemon_constants(self, **data):
url = '/daemon/constants'
return self(GET, url, data)
def get_daemon_stop(self, **data):
url = '/daemon/stop'
return self(GET, url, data)
def get_daemon_version(self, **data):
url = '/daemon/version'
return self(GET, url, data)
def get_gateway(self, **data):
url = '/gateway'
return self(GET, url, data)
def set_gateway_connect(self, netaddress, **data):
url = '/gateway/connect/{netaddress}'.format(netaddress=netaddress)
return self(POST, url, data)
def set_gateway_disconnect(self, netaddress, **data):
url = '/gateway/disconnect/{netaddress}'.format(netaddress=netaddress)
return self(POST, url, data)
def get_host(self, **data):
url = '/host'
return self(GET, url, data)
def set_host(self, **data):
url = '/host'
return self(POST, url, data)
def set_host_announce(self, **data):
url = '/host/announce'
return self(POST, url, data)
def get_host_storage(self, **data):
url = '/host/storage'
return self(GET, url, data)
def set_host_storage_folders_add(self, **data):
url = '/host/storage/folders/add'
return self(POST, url, data)
def set_host_storage_folders_remove(self, **data):
url = '/host/storage/folders/remove'
return self(POST, url, data)
def set_host_storage_folders_resize(self, **data):
url = '/host/storage/folders/resize'
return self(POST, url, data)
def set_host_storage_sectors_delete(self, merkleroot, **data):
url = '/host/storage/sectors/delete/{merkleroot}'.format(merkleroot=merkleroot)
return self(POST, url, data)
def get_hostdb_active(self, **data):
url = '/hostdb/active'
return self(GET, url, data)
def get_hostdb_all(self, **data):
url = '/hostdb/all'
return self(GET, url, data)
def get_hostdb_hosts(self, pubkey, **data):
url = '/hostdb/hosts/{pubkey}'.format(pubkey=pubkey)
return self(GET, url, data)
def get_miner(self, **data):
url = '/miner'
return self(GET, url, data)
def get_miner_header(self, **data):
url = '/miner/header'
return self(GET, url, data)
def set_miner_header(self, **data):
url = '/miner/header'
return self(POST, url, data)
def get_miner_start(self, **data):
url = '/miner/start'
return self(GET, url, data)
def get_miner_stop(self, **data):
url = '/miner/stop'
return self(GET, url, data)
def get_renter(self, **data):
url = '/renter'
return self(GET, url, data)
def set_renter(self, **data):
url = '/renter'
return self(POST, url, data)
def get_renter_contracts(self, **data):
url = '/renter/contracts'
return self(GET, url, data)
def set_renter_delete(self, siapath, **data):
url = '/renter/delete/{siapath}'.format(siapath=siapath)
return self(POST, url, data)
def get_renter_download(self, siapath, **data):
url = '/renter/download/{siapath}'.format(siapath=siapath)
return self(GET, url, data)
def get_renter_downloadasync(self, siapath, **data):
url = '/renter/downloadasync/{siapath}'.format(siapath=siapath)
return self(GET, url, data)
def get_renter_downloads(self, **data):
url = '/renter/downloads'
return self(GET, url, data)
def get_renter_files(self, **data):
url = '/renter/files'
return self(GET, url, data)
def get_renter_prices(self, **data):
url = '/renter/prices'
return self(GET, url, data)
def set_renter_rename(self, siapath, **data):
url = '/renter/rename/{siapath}'.format(siapath=siapath)
return self(POST, url, data)
def set_renter_upload(self, siapath, **data):
url = '/renter/upload/{siapath}'.format(siapath=siapath)
return self(POST, url, data)
def get_wallet(self, **data):
url = '/wallet'
return self(GET, url, data)
def set_wallet_033x(self, **data):
url = '/wallet/033x'
return self(POST, url, data)
def get_wallet_address(self, **data):
url = '/wallet/address'
return self(GET, url, data)
def get_wallet_addresses(self, **data):
url = '/wallet/addresses'
return self(GET, url, data)
def get_wallet_backup(self, **data):
url = '/wallet/backup'
return self(GET, url, data)
def set_wallet_init(self, **data):
url = '/wallet/init'
return self(POST, url, data)
def set_wallet_init_seed(self, **data):
url = '/wallet/init/seed'
return self(POST, url, data)
def set_wallet_lock(self, **data):
url = '/wallet/lock'
return self(POST, url, data)
def set_wallet_seed(self, **data):
url = '/wallet/seed'
return self(POST, url, data)
def get_wallet_seeds(self, **data):
url = '/wallet/seeds'
return self(GET, url, data)
def set_wallet_siacoins(self, **data):
url = '/wallet/siacoins'
return self(POST, url, data)
def set_wallet_siafunds(self, **data):
url = '/wallet/siafunds'
return self(POST, url, data)
def set_wallet_siagkey(self, **data):
url = '/wallet/siagkey'
return self(POST, url, data)
def set_wallet_sweep_seed(self, **data):
url = '/wallet/sweep/seed'
return self(POST, url, data)
def get_wallet_transaction(self, id, **data):
url = '/wallet/transaction/{id}'.format(id=id)
return self(GET, url, data)
def get_wallet_transactions(self, addr=None, **data):
if addr:
url = '/wallet/transactions/{addr}'.format(addr=addr)
else:
url = '/wallet/transactions'
return self(GET, url, data)
def set_wallet_unlock(self, **data):
url = '/wallet/unlock'
return self(POST, url, data)
if __name__ == '__main__':
sc = Sia()
cs = sc.get_consensus()
print(cs['height'])
# 108060
backup_made = sc.get_wallet_backup(destination=r'd:\siadwallet.dat')
print(backup_made)
# True
backup_made = sc.get_wallet_backup(destination=r'error causing input?@#$!`')
print(backup_made)
# {'message': 'error when calling /wallet/backup: destination must be an absolute path'}
print(sc.get_gateway())
# {'peers': [{'netaddress': '92.253.172.90:9981', 'version': '0.5.2', 'inbound': False, 'local': False}, {'netaddress': '176.9.43.109:9981', 'version': '1.1.2', 'inbound': False, 'local': False}, {'netaddress': '91.134.136.124:9981', 'version': '1.2.0', 'inbound': False, 'local': False}, {'netaddress': '76.190.165.207:9981', 'version': '1.2.1', 'inbound': False, 'local': False}, {'netaddress': '51.15.58.86:9981', 'version': '1.1.2', 'inbound': False, 'local': False}, {'netaddress': '37.139.15.138:9981', 'version': '1.0.0', 'inbound': False, 'local': False}, {'netaddress': '87.98.189.200:9981', 'version': '1.2.2', 'inbound': False, 'local': False}], 'netaddress': '99.244.212.203:9981'}
print(sc.set_gateway_connect('212.77.177.47:9981'))
# True
print(sc.set_gateway_disconnect('212.77.177.47:9981'))
# True
print(sc.set_gateway_disconnect('212.77.177.47:9981'))
# {'message': 'not connected to that node'}
|
|
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src/defects')
if root not in sys.path:
sys.path.append(root)
import warnings
from prediction.model import logistic_model
from py_weka.classifier import classify
from utils import *
from metrics.abcd import abcd
from metrics.recall_vs_loc import get_curve
from mklaren.kernel.kinterface import Kinterface
from mklaren.kernel.kernel import *
from mklaren.projection.icd import ICD
from pdb import set_trace
import numpy as np
from scipy.spatial.distance import pdist, squareform
import pandas
from plot.effort_plot import effort_plot
from tabulate import tabulate
# from plot.effort_plot import effort_plot
import warnings
warnings.filterwarnings("ignore")
def get_kernel_matrix(dframe, n_dim=15):
"""
This returns a Kernel Transformation Matrix $\Theta$
It uses kernel approximation offered by the MKlaren package
For the sake of completeness (and for my peace of mind, I use the best possible approx.)
:param dframe: input data as a pandas dataframe.
:param n_dim: Number of dimensions for the kernel matrix (default=15)
:return: $\Theta$ matrix
"""
ker = Kinterface(data=dframe.values, kernel=linear_kernel)
model = ICD(rank=n_dim)
model.fit(ker)
g_nystrom = model.G
return g_nystrom
def map_transform(src, tgt, n_components=5):
"""
Run a map and transform x and y onto a new space using TCA
:param src: IID samples
:param tgt: IID samples
:return: Mapped x and y
"""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
col_name = ["Col_" + str(i) for i in xrange(n_components)]
x0 = pd.DataFrame(get_kernel_matrix(S, n_components), columns=col_name)
y0 = pd.DataFrame(get_kernel_matrix(T, n_components), columns=col_name)
# set_trace()
x0.loc[:, src.columns[-1]] = pd.Series(src[src.columns[-1]], index=x0.index)
y0.loc[:, tgt.columns[-1]] = pd.Series(tgt[tgt.columns[-1]], index=y0.index)
return x0, y0
def predict_defects(train, test, weka=False, cutoff=0.6):
"""
:param train:
:type train:
:param test:
:type test:
:param weka:
:type weka:
:return:
"""
actual = test[test.columns[-1]].values.tolist()
actual = [1 if act == "T" else 0 for act in actual]
if weka:
train.to_csv(root + '/TCA/tmp/train.csv', index=False)
test.to_csv(root + '/TCA/tmp/test.csv', index=False)
__, distr = classify(train=os.path.abspath(root + '/TCA/tmp/train.csv'),
test=os.path.abspath(root + '/TCA/tmp/test.csv'),
name="rf", tuning=False)
# set_trace()
predicted = [1 if d > cutoff else 0 for d in distr]
# Remove temporary csv files to avoid conflicts
os.remove(root + '/TCA/tmp/train.csv')
os.remove(root + '/TCA/tmp/test.csv')
else:
predicted, distr = logistic_model(train, test)
return actual, predicted, distr
def get_dcv(src, tgt):
"""Get dataset characteristic vector."""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
def self_dist_mtx(arr):
dist_arr = pdist(arr)
return squareform(dist_arr)
dist_src = self_dist_mtx(S.values)
dist_tgt = self_dist_mtx(T.values)
dcv_src = [np.mean(dist_src), np.median(dist_src), np.min(dist_src),
np.max(dist_src), np.std(dist_src),
len(S.values)]
dcv_tgt = [np.mean(dist_tgt), np.median(dist_tgt), np.min(dist_tgt),
np.max(dist_tgt), np.std(dist_tgt),
len(T.values)]
return dcv_src, dcv_tgt
def sim(c_s, c_t, e=0):
if c_s[e] * 1.6 < c_t[e]:
return "VH" # Very High
if c_s[e] * 1.3 < c_t[e] <= c_s[e] * 1.6:
return "H" # High
if c_s[e] * 1.1 < c_t[e] <= c_s[e] * 1.3:
return "SH" # Slightly High
if c_s[e] * 0.9 <= c_t[e] <= c_s[e] * 1.1:
return "S" # Same
if c_s[e] * 0.7 <= c_t[e] < c_s[e] * 0.9:
return "SL" # Slightly Low
if c_s[e] * 0.4 <= c_t[e] < c_s[e] * 0.7:
return "L" # Low
if c_t[e] < c_s[e] * 0.4:
return "VL" # Very Low
def smart_norm(src, tgt, c_s, c_t):
"""
ARE THESE NORMS CORRECT?? OPEN AN ISSUE REPORT TO VERIFY
:param src:
:param tgt:
:param c_s:
:param c_t:
:return:
"""
try: # !!GUARD: PLEASE REMOVE AFTER DEBUGGING!!
# Rule 1
if sim(c_s, c_t, e=0) == "S" and sim(c_s, c_t, e=-2) == "S":
return src, tgt
# Rule 2
elif sim(c_s, c_t, e=2) == "VL" or "VH" \
and sim(c_s, c_t, e=3) == "VL" or "VH" \
and sim(c_s, c_t, e=-1) == "VL" or "VH":
return df_norm(src), df_norm(tgt)
# Rule 3.1
elif sim(c_s, c_t, e=-2) == "VH" and c_s[-1] > c_t[-1] or \
sim(c_s, c_t, e=-2) == "VL" and c_s[-1] < c_t[
-1]:
return df_norm(src, type="normal"), df_norm(tgt)
# Rule 4
elif sim(c_s, c_t, e=-2) == "VH" and c_s[-1] < c_t[-1] or \
sim(c_s, c_t, e=-2) == "VL" and c_s[-1] > c_t[
-1]:
return df_norm(src), df_norm(tgt, type="normal")
else:
return df_norm(src, type="normal"), df_norm(tgt, type="normal")
except:
# set_trace()
return src, tgt
def tca_plus(source, target, n_rep=12):
"""
TCA: Transfer Component Analysis
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
stats = []
for tgt_name, tgt_path in target.iteritems():
print("TCA+ ")
val = []
for src_name, src_path in source.iteritems():
if not src_name == tgt_name:
# print("{} \r".format(src_name[0].upper() + src_name[1:]))
src = list2dataframe(src_path.data)
tgt = list2dataframe(tgt_path.data)
pd, pf, g, auc = [], [], [], []
dcv_src, dcv_tgt = get_dcv(src, tgt)
for _ in xrange(n_rep):
recall, loc = None, None
norm_src, norm_tgt = smart_norm(src, tgt, dcv_src, dcv_tgt)
_train, __test = map_transform(norm_src, norm_tgt)
try:
actual, predicted, distribution = predict_defects(
train=_train, test=__test)
except:
set_trace()
p_d, p_f, p_r, rc, f_1, e_d, _g, auroc = abcd(actual,
predicted,
distribution)
pd.append(p_d)
pf.append(p_f)
g.append(e_d)
auc.append(int(auroc))
stats.append([src_name, int(np.mean(pd)), int(np.std(pd)),
int(np.mean(pf)), int(np.std(pf)),
int(np.mean(g)), int(np.std(g))])
stats = pandas.DataFrame(
sorted(stats, key=lambda lst: lst[0], reverse=True),
columns=["Name", "Pd (Mean)", "Pd (Std)",
"Pf (Mean)", "Pf (Std)",
"AUC (Mean)", "AUC (Std)"]) # ,
return stats
def tca_plus_loo(source, target, n_rep=12):
"""
TCA: Transfer Component Analysis with leave one out crossvalidation
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
for hld_name, hld_path in target.iteritems():
stats = []
holdout = hld_name
print("Holdout: {}".format(holdout))
for src_name, src_path in source.iteritems():
if not src_name == holdout:
pd, pf, pr, f1, g, auc = [], [], [], [], [], []
for tgt_name, tgt_path in target.iteritems():
if src_name != tgt_name and tgt_name != hld_name:
src = list2dataframe(src_path.data)
tgt = list2dataframe(tgt_path.data)
pd, pf, g, auc = [], [], [], []
dcv_src, dcv_tgt = get_dcv(src, tgt)
for _ in xrange(n_rep):
recall, loc = None, None
norm_src, norm_tgt = smart_norm(src, tgt, dcv_src, dcv_tgt)
_train, __test = map_transform(norm_src, norm_tgt)
try:
actual, predicted, distribution = predict_defects(
train=_train, test=__test)
except:
set_trace()
p_d, p_f, p_r, rc, f_1, e_d, _g, auroc = abcd(actual,
predicted,
distribution)
pd.append(p_d)
pf.append(p_f)
g.append(e_d)
auc.append(int(auroc))
stats.append([src_name, int(np.mean(pd)), int(np.std(pd)),
int(np.mean(pf)), int(np.std(pf)),
int(np.mean(g)), int(np.std(g))])
stats = pandas.DataFrame(
sorted(stats, key=lambda lst: lst[0], reverse=True),
columns=["Name", "Pd (Mean)", "Pd (Std)",
"Pf (Mean)", "Pf (Std)",
"AUC (Mean)", "AUC (Std)"]) # ,
return stats
def tca_plus_bellw(source, target, n_rep=12):
"""
TCA: Transfer Component Analysis
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
print("TCA Plus")
stats = []
for tgt_name, tgt_path in target.iteritems():
val = []
for src_name, src_path in source.iteritems():
if src_name == 'lucene':
if not src_name == tgt_name:
src = list2dataframe([src_path.data[-1]])
tgt = list2dataframe([tgt_path.data[-1]])
pd, pf, g, auc = [], [], [], []
dcv_src, dcv_tgt = get_dcv(src, tgt)
for _ in xrange(n_rep):
recall, loc = None, None
norm_src, norm_tgt = smart_norm(src, tgt, dcv_src,
dcv_tgt)
_train, __test = map_transform(norm_src, norm_tgt)
try:
actual, predicted, distribution = predict_defects(
train=_train, test=__test)
except:
set_trace()
p_d, p_f, p_r, rc, f_1, e_d, _g, auroc = abcd(actual,
predicted,
distribution)
pd.append(p_d)
pf.append(p_f)
g.append(e_d)
auc.append(int(auroc))
stats.append([tgt_name, int(np.mean(pd)), int(np.std(pd)),
int(np.mean(pf)), int(np.std(pf)),
int(np.mean(g)), int(np.std(g))]) # ,
stats = pandas.DataFrame(
sorted(stats, key=lambda lst: lst[-2], reverse=True), # Sort by G Score
columns=["Name", "Pd (Mean)", "Pd (Std)",
"Pf (Mean)", "Pf (Std)",
"g (Mean)", "g (Std)"]) # ,
print(tabulate(stats
, headers=["Name", "Pd (Mean)", "Pd (Std)",
"Pf (Mean)", "Pf (Std)",
"g (Mean)", "g (Std)"]
, tablefmt="fancy_grid"))
return stats
def tca_jur():
from data.handler import get_all_projects
all = get_all_projects()
apache = all["Apache"]
return tca_plus(apache, apache, n_rep=1)
if __name__ == "__main__":
tca_jur()
|
|
# panel.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0302,R0912,R0913,R0914,R0915,W0105,W0212
# PyPI imports
import numpy
import matplotlib.pyplot as plt
# Putil imports
import putil.exh
import putil.pcontracts
from .series import Series
from .functions import _F, _intelligent_ticks, _uniquify_tick_labels
from .constants import AXIS_LABEL_FONT_SIZE, AXIS_TICKS_FONT_SIZE, LEGEND_SCALE
###
# Exception tracing initialization code
###
"""
[[[cog
import os, sys
if sys.hexversion < 0x03000000:
import __builtin__
else:
import builtins as __builtin__
sys.path.append(os.environ['TRACER_DIR'])
import trace_ex_plot_panel
exobj_plot = trace_ex_plot_panel.trace_module(no_print=True)
]]]
[[[end]]]
"""
###
# Functions
###
def _legend_position_validation(obj):
""" Validate if a string is a valid legend position """
options = [
'BEST', 'UPPER RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT',
'RIGHT', 'CENTER LEFT', 'CENTER RIGHT', 'LOWER CENTER',
'UPPER CENTER', 'CENTER'
]
if (obj is not None) and (not isinstance(obj, str)):
return True
if ((obj is None) or
(obj and any([item.lower() == obj.lower() for item in options]))):
return False
return True
###
# Class
###
class Panel(object):
r"""
Defines a panel within a figure
:param series: One or more data series
:type series: :py:class:`putil.plot.Series` *or list of*
:py:class:`putil.plot.Series` *or None*
:param primary_axis_label: Primary dependent axis label
:type primary_axis_label: string
:param primary_axis_units: Primary dependent axis units
:type primary_axis_units: string
:param primary_axis_ticks: Primary dependent axis tick marks. If not None
overrides automatically generated tick
marks if the axis type is linear. If None
automatically generated tick marks are used for
the primary axis
:type primary_axis_ticks: list, Numpy vector or None
:param secondary_axis_label: Secondary dependent axis label
:type secondary_axis_label: string
:param secondary_axis_units: Secondary dependent axis units
:type secondary_axis_units: string
:param secondary_axis_ticks: Secondary dependent axis tick marks. If not
None overrides automatically generated tick
marks if the axis type is linear. If None
automatically generated tick marks are used
for the secondary axis
:type secondary_axis_ticks: list, Numpy vector or None
:param log_dep_axis: Flag that indicates whether the dependent (primary and
/or secondary) axis is linear (False) or logarithmic
(True)
:type log_dep_axis: boolean
:param legend_props: Legend properties. See
:py:attr:`putil.plot.Panel.legend_props`. If None the
legend is placed in the best position in one column
:type legend_props: dictionary or None
:param display_indep_axis: Flag that indicates whether the independent axis
is displayed (True) or not (False)
:type display_indep_axis: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.__init__
:raises:
* RuntimeError (Argument \`display_indep_axis\` is not valid)
* RuntimeError (Argument \`legend_props\` is not valid)
* RuntimeError (Argument \`log_dep_axis\` is not valid)
* RuntimeError (Argument \`primary_axis_label\` is not valid)
* RuntimeError (Argument \`primary_axis_ticks\` is not valid)
* RuntimeError (Argument \`primary_axis_units\` is not valid)
* RuntimeError (Argument \`secondary_axis_label\` is not valid)
* RuntimeError (Argument \`secondary_axis_ticks\` is not valid)
* RuntimeError (Argument \`secondary_axis_units\` is not valid)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Legend property \`cols\` is not valid)
* RuntimeError (Series item *[number]* is not fully specified)
* TypeError (Legend property \`pos\` is not one of ['BEST', 'UPPER
RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT', 'RIGHT', 'CENTER
LEFT', 'CENTER RIGHT', 'LOWER CENTER', 'UPPER CENTER', 'CENTER']
(case insensitive))
* ValueError (Illegal legend property \`*[prop_name]*\`)
* ValueError (Series item *[number]* cannot be plotted in a
logarithmic axis because it contains negative data points)
.. [[[end]]]
"""
# pylint: disable=R0902,R0903,W0102
def __init__(self, series=None, primary_axis_label='',
primary_axis_units='', primary_axis_ticks=None,
secondary_axis_label='', secondary_axis_units='',
secondary_axis_ticks=None, log_dep_axis=False,
legend_props=None, display_indep_axis=False):
# Private attributes
self._series = None
self._primary_axis_label = None
self._secondary_axis_label = None
self._primary_axis_units = None
self._secondary_axis_units = None
self._primary_axis_ticks = None
self._secondary_axis_ticks = None
self._log_dep_axis = None
self._recalculate_series = False
self._legend_props = {'pos':'BEST', 'cols':1}
self._display_indep_axis = None
# Private attributes
self._legend_pos_list = [
'best', 'upper right', 'upper left', 'lower left', 'lower right',
'right', 'center left', 'center right', 'lower center',
'upper center', 'center'
]
self._panel_has_primary_axis = False
self._panel_has_secondary_axis = False
self._primary_dep_var_min = None
self._primary_dep_var_max = None
self._primary_dep_var_div = None
self._primary_dep_var_unit_scale = None
self._primary_dep_var_locs = None
self._primary_dep_var_labels = None
self._secondary_dep_var_min = None
self._secondary_dep_var_max = None
self._secondary_dep_var_div = None
self._secondary_dep_var_unit_scale = None
self._secondary_dep_var_locs = None
self._secondary_dep_var_labels = None
self._legend_props_list = ['pos', 'cols']
self._legend_props_pos_list = [
'BEST', 'UPPER RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT',
'RIGHT', 'CENTER LEFT', 'CENTER RIGHT', 'LOWER CENTER',
'UPPER CENTER', 'CENTER'
]
# Exceptions definition
invalid_prim_ex = putil.exh.addai('primary_axis_ticks')
invalid_sec_ex = putil.exh.addai('secondary_axis_ticks')
invalid_prim_ex(
(primary_axis_ticks is not None) and (
(not isinstance(primary_axis_ticks, list)) and
(not isinstance(primary_axis_ticks, numpy.ndarray))
)
)
invalid_sec_ex(
(secondary_axis_ticks is not None) and (
(not isinstance(secondary_axis_ticks, list)) and
(not isinstance(secondary_axis_ticks, numpy.ndarray)))
)
# Assignment of arguments to attributes
# Order here is important to avoid unnecessary re-calculating of
# panel axes if log_dep_axis is True
self._set_log_dep_axis(log_dep_axis)
self._primary_axis_ticks = (
primary_axis_ticks
if not self.log_dep_axis else
None
)
self._secondary_axis_ticks = (
secondary_axis_ticks
if not self.log_dep_axis else
None
)
self._set_series(series)
self._set_primary_axis_label(primary_axis_label)
self._set_primary_axis_units(primary_axis_units)
self._set_secondary_axis_label(secondary_axis_label)
self._set_secondary_axis_units(secondary_axis_units)
self._set_legend_props(legend_props)
self._set_display_indep_axis(display_indep_axis)
def __bool__(self): # pragma: no cover
"""
Returns :code:`True` if the panel has at least a series associated
with it, :code:`False` otherwise
.. note:: This method applies to Python 3.x
"""
return self._series is not None
def __iter__(self):
"""
Returns an iterator over the series object(s) in the panel. For
example:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('plot_example_6.py', cog.out)
.. =]=
.. code-block:: python
# plot_example_6.py
from __future__ import print_function
import numpy, putil.plot
def panel_iterator_example(no_print):
source1 = putil.plot.BasicSource(
indep_var=numpy.array([1, 2, 3, 4]),
dep_var=numpy.array([1, -10, 10, 5])
)
source2 = putil.plot.BasicSource(
indep_var=numpy.array([100, 200, 300, 400]),
dep_var=numpy.array([50, 75, 100, 125])
)
series1 = putil.plot.Series(
data_source=source1,
label='Goals'
)
series2 = putil.plot.Series(
data_source=source2,
label='Saves',
color='b',
marker=None,
interp='STRAIGHT',
line_style='--'
)
panel = putil.plot.Panel(
series=[series1, series2],
primary_axis_label='Time',
primary_axis_units='sec',
display_indep_axis=True
)
if not no_print:
for num, series in enumerate(panel):
print('Series {0}:'.format(num+1))
print(series)
print('')
else:
return panel
.. =[=end=]=
.. code-block:: python
>>> import docs.support.plot_example_6 as mod
>>> mod.panel_iterator_example(False)
Series 1:
Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]
Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]
Label: Goals
Color: k
Marker: o
Interpolation: CUBIC
Line style: -
Secondary axis: False
<BLANKLINE>
Series 2:
Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]
Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]
Label: Saves
Color: b
Marker: None
Interpolation: STRAIGHT
Line style: --
Secondary axis: False
<BLANKLINE>
"""
return iter(self._series)
def __nonzero__(self): # pragma: no cover
"""
Returns :code:`True` if the panel has at least a series associated
with it, :code:`False` otherwise
.. note:: This method applies to Python 2.x
"""
return self._series is not None
def _get_series(self):
return self._series
def _set_series(self, series):
# pylint: disable=C0103
self._series = (
(series if isinstance(series, list) else [series])
if series is not None else
series
)
self._recalculate_series = False
if self.series is not None:
self._validate_series()
self._panel_has_primary_axis = any(
[not series_obj.secondary_axis for series_obj in self.series]
)
self._panel_has_secondary_axis = any(
[series_obj.secondary_axis for series_obj in self.series]
)
comp_prim_dep_var = (
(not self.log_dep_axis) and self._panel_has_primary_axis
)
comp_sec_dep_var = (
(not self.log_dep_axis) and self._panel_has_secondary_axis
)
panel_has_primary_interp_series = any(
[
(not series_obj.secondary_axis) and
(series_obj.interp_dep_var is not None)
for series_obj in self.series
]
)
panel_has_secondary_interp_series = any(
[
series_obj.secondary_axis and
(series_obj.interp_dep_var is not None)
for series_obj in self.series
]
)
# Compute panel scaling factor
primary_min = None
prim_interp_min = None
secondary_min = None
sec_interp_min = None
primary_max = None
prim_interp_max = None
secondary_max = None
sec_interp_max = None
panel_min = None
panel_max = None
# Find union of all data points and panel minimum and maximum.
# If panel has logarithmic dependent axis, limits are common and
# the union of the limits of both axis
# Primary axis
glob_prim_dep_var = (
numpy.unique(
numpy.concatenate(
[
series_obj.dep_var
for series_obj in self.series
if not series_obj.secondary_axis
]
)
)
if comp_prim_dep_var else
None
)
prim_interp_min = (
min(
[
min(series_obj.dep_var)
for series_obj in self.series
if ((not series_obj.secondary_axis) and
(series_obj.interp_dep_var is not None))
]
)
if panel_has_primary_interp_series else
None
)
prim_interp_max = (
max(
[
max(series_obj.dep_var)
for series_obj in self.series
if ((not series_obj.secondary_axis) and
(series_obj.interp_dep_var is not None))
]
)
if panel_has_primary_interp_series else
None
)
primary_min = (
min(min(glob_prim_dep_var), prim_interp_min)
if comp_prim_dep_var and (prim_interp_min is not None) else
(min(glob_prim_dep_var) if comp_prim_dep_var else None)
)
primary_max = (
max(max(glob_prim_dep_var), prim_interp_max)
if comp_prim_dep_var and (prim_interp_min is not None) else
(max(glob_prim_dep_var) if comp_prim_dep_var else None)
)
# Secondary axis
glob_sec_dep_var = (
numpy.unique(
numpy.concatenate(
[
series_obj.dep_var
for series_obj in self.series
if series_obj.secondary_axis
]
)
)
if comp_sec_dep_var else
None
)
sec_interp_min = (
min(
[
min(series_obj.dep_var)
for series_obj in self.series
if (series_obj.secondary_axis and
(series_obj.interp_dep_var is not None))
]
).tolist()
if panel_has_secondary_interp_series else
None
)
sec_interp_max = (
max(
[
max(series_obj.dep_var)
for series_obj in self.series
if (series_obj.secondary_axis and
(series_obj.interp_dep_var is not None))
]
).tolist()
if panel_has_secondary_interp_series else
None
)
secondary_min = (
min(min(glob_sec_dep_var), sec_interp_min)
if comp_sec_dep_var and (sec_interp_min is not None) else
(min(glob_sec_dep_var) if comp_sec_dep_var else None)
)
secondary_max = (
max(max(glob_sec_dep_var), sec_interp_max)
if comp_sec_dep_var and (sec_interp_max is not None) else
(max(glob_sec_dep_var) if comp_sec_dep_var else None)
)
# Global (for logarithmic dependent axis)
glob_panel_dep_var = (
None
if not self.log_dep_axis else
numpy.unique(
numpy.concatenate(
[series_obj.dep_var for series_obj in self.series]
)
)
)
panel_min = (
min(min(glob_panel_dep_var), prim_interp_min)
if self.log_dep_axis and panel_has_primary_interp_series else
(min(glob_panel_dep_var) if self.log_dep_axis else None)
)
panel_max = (
max(max(glob_panel_dep_var), prim_interp_max)
if self.log_dep_axis and panel_has_primary_interp_series else
(max(glob_panel_dep_var) if self.log_dep_axis else None)
)
panel_min = (
min(min(glob_panel_dep_var), sec_interp_min)
if self.log_dep_axis and panel_has_secondary_interp_series else
(min(glob_panel_dep_var) if self.log_dep_axis else None)
)
panel_max = (
max(max(glob_panel_dep_var), sec_interp_max)
if self.log_dep_axis and panel_has_secondary_interp_series else
(max(glob_panel_dep_var) if self.log_dep_axis else None)
)
# Get axis tick marks locations
if comp_prim_dep_var:
(
self._primary_dep_var_locs,
self._primary_dep_var_labels,
self._primary_dep_var_min,
self._primary_dep_var_max,
self._primary_dep_var_div,
self._primary_dep_var_unit_scale
) = _intelligent_ticks(
glob_prim_dep_var,
primary_min,
primary_max,
tight=False,
log_axis=self.log_dep_axis,
tick_list=self._primary_axis_ticks,
)
if comp_sec_dep_var:
(
self._secondary_dep_var_locs,
self._secondary_dep_var_labels,
self._secondary_dep_var_min,
self._secondary_dep_var_max,
self._secondary_dep_var_div,
self._secondary_dep_var_unit_scale
) = _intelligent_ticks(
glob_sec_dep_var,
secondary_min,
secondary_max,
tight=False,
log_axis=self.log_dep_axis,
tick_list=self._secondary_axis_ticks,
)
if self.log_dep_axis and self._panel_has_primary_axis:
(
self._primary_dep_var_locs,
self._primary_dep_var_labels,
self._primary_dep_var_min,
self._primary_dep_var_max,
self._primary_dep_var_div,
self._primary_dep_var_unit_scale
) = _intelligent_ticks(
glob_panel_dep_var,
panel_min,
panel_max,
tight=False,
log_axis=self.log_dep_axis
)
if self.log_dep_axis and self._panel_has_secondary_axis:
(
self._secondary_dep_var_locs,
self._secondary_dep_var_labels,
self._secondary_dep_var_min,
self._secondary_dep_var_max,
self._secondary_dep_var_div,
self._secondary_dep_var_unit_scale
) = _intelligent_ticks(
glob_panel_dep_var,
panel_min,
panel_max,
tight=False,
log_axis=self.log_dep_axis
)
# Equalize number of ticks on primary and secondary axis so that
# ticks are in the same percentage place within the dependent
# variable plotting interval (for non-logarithmic panels)
# If there is any tick override (primary and/or secondary) this
# is not done, the user assumes responsibility for aesthetics of
# final result
if ((not self.log_dep_axis) and
self._panel_has_primary_axis and
self._panel_has_secondary_axis and
(self._primary_axis_ticks is None) and
(self._secondary_axis_ticks is None)):
max_ticks = max(
len(self._primary_dep_var_locs),
len(self._secondary_dep_var_locs)
)-1
primary_delta = (
(
self._primary_dep_var_locs[-1]-
self._primary_dep_var_locs[0]
)
/
float(max_ticks)
)
secondary_delta = (
(
self._secondary_dep_var_locs[-1]-
self._secondary_dep_var_locs[0]
)
/
float(max_ticks)
)
self._primary_dep_var_locs = [
self._primary_dep_var_locs[0]+(num*primary_delta)
for num in range(max_ticks+1)
]
self._secondary_dep_var_locs = [
self._secondary_dep_var_locs[0]+(num*secondary_delta)
for num in range(max_ticks+1)
]
(
self._primary_dep_var_locs,
self._primary_dep_var_labels
) = _uniquify_tick_labels(
self._primary_dep_var_locs,
self._primary_dep_var_locs[0],
self._primary_dep_var_locs[-1]
)
(
self._secondary_dep_var_locs,
self._secondary_dep_var_labels
) = _uniquify_tick_labels(
self._secondary_dep_var_locs,
self._secondary_dep_var_locs[0],
self._secondary_dep_var_locs[-1]
)
self._primary_axis_ticks = self._primary_dep_var_locs
self._secondary_axis_ticks = self._secondary_dep_var_locs
# Scale panel
self._scale_dep_var(
self._primary_dep_var_div,
self._secondary_dep_var_div
)
def _get_primary_axis_scale(self):
return self._primary_dep_var_div
def _get_primary_axis_ticks(self):
return self._primary_axis_ticks
def _get_secondary_axis_scale(self):
return self._secondary_dep_var_div
def _get_secondary_axis_ticks(self):
return self._secondary_axis_ticks
def _get_primary_axis_label(self):
return self._primary_axis_label
@putil.pcontracts.contract(primary_axis_label='None|str')
def _set_primary_axis_label(self, primary_axis_label):
self._primary_axis_label = primary_axis_label
def _get_primary_axis_units(self):
return self._primary_axis_units
@putil.pcontracts.contract(primary_axis_units='None|str')
def _set_primary_axis_units(self, primary_axis_units):
self._primary_axis_units = primary_axis_units
def _get_secondary_axis_label(self):
return self._secondary_axis_label
@putil.pcontracts.contract(secondary_axis_label='None|str')
def _set_secondary_axis_label(self, secondary_axis_label):
self._secondary_axis_label = secondary_axis_label
def _get_secondary_axis_units(self):
return self._secondary_axis_units
@putil.pcontracts.contract(secondary_axis_units='None|str')
def _set_secondary_axis_units(self, secondary_axis_units):
self._secondary_axis_units = secondary_axis_units
def _get_log_dep_axis(self):
return self._log_dep_axis
@putil.pcontracts.contract(log_dep_axis='None|bool')
def _set_log_dep_axis(self, log_dep_axis):
self._recalculate_series = self.log_dep_axis != log_dep_axis
self._log_dep_axis = log_dep_axis
if self._recalculate_series:
self._set_series(self._series)
def _get_display_indep_axis(self):
return self._display_indep_axis
@putil.pcontracts.contract(display_indep_axis='None|bool')
def _set_display_indep_axis(self, display_indep_axis):
self._display_indep_axis = display_indep_axis
def _get_legend_props(self):
return self._legend_props
@putil.pcontracts.contract(legend_props='None|dict')
def _set_legend_props(self, legend_props):
invalid_ex = putil.exh.addex(
ValueError, 'Illegal legend property `*[prop_name]*`'
)
illegal_ex = putil.exh.addex(
TypeError,
"Legend property `pos` is not one of ['BEST', 'UPPER RIGHT', "
"'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT', 'RIGHT', "
"'CENTER LEFT', 'CENTER RIGHT', 'LOWER CENTER', "
"'UPPER CENTER', 'CENTER'] (case insensitive)"
)
cols_ex = putil.exh.addex(
RuntimeError, 'Legend property `cols` is not valid'
)
self._legend_props = (
legend_props
if legend_props is not None else
{'pos':'BEST', 'cols':1}
)
self._legend_props.setdefault('pos', 'BEST')
self._legend_props.setdefault('cols', 1)
for key, value in self.legend_props.items():
invalid_ex(
key not in self._legend_props_list, _F('prop_name', key)
)
illegal_ex(
(key == 'pos') and
_legend_position_validation(self.legend_props['pos'])
)
cols_ex(
((key == 'cols') and (not isinstance(value, int))) or
((key == 'cols') and
(isinstance(value, int) is True) and (value < 0))
)
self._legend_props['pos'] = self._legend_props['pos'].upper()
def __str__(self):
"""
Prints panel information. For example:
.. code-block:: python
>>> from __future__ import print_function
>>> import docs.support.plot_example_6 as mod
>>> print(mod.panel_iterator_example(True))
Series 0:
Independent variable: [ 1.0, 2.0, 3.0, 4.0 ]
Dependent variable: [ 1.0, -10.0, 10.0, 5.0 ]
Label: Goals
Color: k
Marker: o
Interpolation: CUBIC
Line style: -
Secondary axis: False
Series 1:
Independent variable: [ 100.0, 200.0, 300.0, 400.0 ]
Dependent variable: [ 50.0, 75.0, 100.0, 125.0 ]
Label: Saves
Color: b
Marker: None
Interpolation: STRAIGHT
Line style: --
Secondary axis: False
Primary axis label: Time
Primary axis units: sec
Secondary axis label: not specified
Secondary axis units: not specified
Logarithmic dependent axis: False
Display independent axis: True
Legend properties:
cols: 1
pos: BEST
"""
ret = ''
if (self.series is None) or (len(self.series) == 0):
ret += 'Series: None\n'
else:
for num, element in enumerate(self.series):
ret += 'Series {0}:\n'.format(num)
temp = str(element).split('\n')
temp = [3*' '+line for line in temp]
ret += '\n'.join(temp)
ret += '\n'
ret += 'Primary axis label: {0}\n'.format(
self.primary_axis_label
if self.primary_axis_label not in ['', None] else
'not specified'
)
ret += 'Primary axis units: {0}\n'.format(
self.primary_axis_units
if self.primary_axis_units not in ['', None] else
'not specified'
)
ret += 'Secondary axis label: {0}\n'.format(
self.secondary_axis_label
if self.secondary_axis_label not in ['', None] else
'not specified'
)
ret += 'Secondary axis units: {0}\n'.format(
self.secondary_axis_units
if self.secondary_axis_units not in ['', None] else
'not specified'
)
ret += 'Logarithmic dependent axis: {0}\n'.format(self.log_dep_axis)
ret += (
'Display independent '
'axis: {0}\n'.format(self.display_indep_axis)
)
ret += 'Legend properties:\n'
iobj = enumerate(sorted(list(self.legend_props.items())))
for num, (key, value) in iobj:
ret += ' {0}: {1}{2}'.format(
key, value, '\n' if num+1 < len(self.legend_props) else ''
)
return ret
def _validate_series(self):
"""
Verifies that elements of series list are of the right type and
fully specified
"""
invalid_ex = putil.exh.addai('series')
incomplete_ex = putil.exh.addex(
RuntimeError, 'Series item *[number]* is not fully specified'
)
log_ex = putil.exh.addex(
ValueError,
'Series item *[number]* cannot be plotted in a logarithmic '
'axis because it contains negative data points'
)
for num, obj in enumerate(self.series):
invalid_ex(not isinstance(obj, Series))
incomplete_ex(not obj._complete, _F('number', num))
log_ex(
bool((min(obj.dep_var) <= 0) and self.log_dep_axis),
_F('number', num)
)
def _get_complete(self):
"""
Returns True if panel is fully specified, otherwise returns False
"""
return (self.series is not None) and (len(self.series) > 0)
def _scale_indep_var(self, scaling_factor):
""" Scale independent variable of panel series """
for series_obj in self.series:
series_obj._scale_indep_var(scaling_factor)
def _scale_dep_var(self, primary_scaling_factor, secondary_scaling_factor):
""" Scale dependent variable of panel series """
for series_obj in self.series:
if not series_obj.secondary_axis:
series_obj._scale_dep_var(primary_scaling_factor)
else:
series_obj._scale_dep_var(secondary_scaling_factor)
def _setup_axis(self, axis_type, axis_obj, dep_min, dep_max, tick_locs,
tick_labels, axis_label, axis_units, axis_scale):
""" Configure dependent axis """
# pylint: disable=R0201
# Set function pointers
xflist = [
axis_obj.xaxis.grid, axis_obj.set_xlim, axis_obj.xaxis.set_ticks,
axis_obj.xaxis.set_ticklabels, axis_obj.xaxis.set_label_text
]
yflist = [
axis_obj.yaxis.grid, axis_obj.set_ylim, axis_obj.yaxis.set_ticks,
axis_obj.yaxis.set_ticklabels, axis_obj.yaxis.set_label_text
]
(fgrid, flim, fticks, fticklabels, fset_label_text) = (
xflist
if axis_type.upper() == 'INDEP' else
yflist
)
# Process
fgrid(True, 'both')
flim((dep_min, dep_max), emit=True, auto=False)
fticks(tick_locs)
axis_obj.tick_params(
axis='x' if axis_type.upper() == 'INDEP' else 'y',
which='major',
labelsize=AXIS_TICKS_FONT_SIZE
)
fticklabels(tick_labels)
if (axis_label not in [None, '']) or (axis_units not in [None, '']):
axis_label = '' if axis_label is None else axis_label.strip()
unit_scale = '' if axis_scale is None else axis_scale.strip()
fset_label_text(
axis_label +
(
''
if (unit_scale == '') and (axis_units == '') else
(
' [{unit_scale}{units}]'.format(
unit_scale=unit_scale,
units='-' if axis_units == '' else axis_units
)
)
),
fontdict={'fontsize':AXIS_LABEL_FONT_SIZE}
)
def _draw_panel(self, axarr_prim, indep_axis_dict, print_indep_axis):
""" Draw panel series """
# pylint: disable=W0612
axarr_sec = (
axarr_prim.twinx()
if self._panel_has_secondary_axis else
None
)
# Place data series in their appropriate axis (primary or secondary)
for series_obj in self.series:
series_obj._draw_series(
axarr_prim if not series_obj.secondary_axis else axarr_sec,
indep_axis_dict['log_indep'],
self.log_dep_axis
)
# Set up tick labels and axis labels
if self._panel_has_primary_axis:
self._setup_axis(
'DEP',
axarr_prim,
self._primary_dep_var_min,
self._primary_dep_var_max,
self._primary_dep_var_locs,
self._primary_dep_var_labels,
self.primary_axis_label,
self.primary_axis_units,
self._primary_dep_var_unit_scale
)
if self._panel_has_secondary_axis:
self._setup_axis(
'DEP',
axarr_sec,
self._secondary_dep_var_min,
self._secondary_dep_var_max,
self._secondary_dep_var_locs,
self._secondary_dep_var_labels,
self.secondary_axis_label,
self.secondary_axis_units,
self._secondary_dep_var_unit_scale
)
if ((not self._panel_has_primary_axis) and
self._panel_has_secondary_axis):
axarr_prim.yaxis.set_visible(False)
# Print legend
if (len(self.series) > 1) and (len(self.legend_props) > 0):
_, primary_labels = (
axarr_prim.get_legend_handles_labels()
if self._panel_has_primary_axis else
(None, [])
)
_, secondary_labels = (
axarr_sec.get_legend_handles_labels()
if self._panel_has_secondary_axis else
(None, [])
)
lprim = len(primary_labels)
lsec = len(secondary_labels)
labels = (
(
[r'$\Leftarrow$'+label for label in primary_labels]+
[label+r'$\Rightarrow$' for label in secondary_labels]
)
if (lprim > 0) and (lsec > 0) else
primary_labels+secondary_labels
)
if any([bool(label) for label in labels]):
leg_artist = [
series_obj._legend_artist(LEGEND_SCALE)
for series_obj in self.series
if series_obj._check_series_is_plottable()
]
legend_axis = (
axarr_prim
if self._panel_has_primary_axis else
axarr_sec
)
loc_key = self._legend_pos_list.index(
self.legend_props['pos'].lower()
if 'pos' in self.legend_props else 'lower left'
)
legend_axis.legend(
leg_artist,
labels,
ncol=self.legend_props['cols']
if 'cols' in self.legend_props else
len(labels),
loc=self._legend_pos_list[loc_key],
numpoints=1,
fontsize=AXIS_LABEL_FONT_SIZE/LEGEND_SCALE
)
# Fix Matplotlib issue where when there is primary and
# secondary axis the legend box of one axis is transparent for
# the axis/series of the other
# From: http://stackoverflow.com/questions/17158469/
# legend-transparency-when-using-secondary-axis
if (self._panel_has_primary_axis and
self._panel_has_secondary_axis):
axarr_prim.set_zorder(1)
axarr_prim.set_frame_on(False)
axarr_sec.set_frame_on(True)
# Print independent axis tick marks and label
(indep_var_min, indep_var_max, indep_var_locs) = (
indep_axis_dict['indep_var_min'],
indep_axis_dict['indep_var_max'],
indep_axis_dict['indep_var_locs']
)
indep_var_labels = (
indep_axis_dict['indep_var_labels']
if ('indep_var_labels' in indep_axis_dict) and
(indep_axis_dict['indep_var_labels'] is not None) else
None
)
indep_axis_label = (
''
if indep_axis_dict['indep_axis_label'] is None or
not print_indep_axis else
indep_axis_dict['indep_axis_label'].strip()
)
indep_axis_units = (
''
if indep_axis_dict['indep_axis_units'] is None or
not print_indep_axis else
indep_axis_dict['indep_axis_units'].strip()
)
indep_axis_unit_scale = (
''
if indep_axis_dict['indep_axis_unit_scale'] is None or
not print_indep_axis else
indep_axis_dict['indep_axis_unit_scale'].strip()
)
self._setup_axis(
'INDEP',
axarr_prim,
indep_var_min,
indep_var_max,
indep_var_locs,
indep_var_labels,
indep_axis_label,
indep_axis_units,
indep_axis_unit_scale
)
plt.setp(axarr_prim.get_xticklabels(), visible=print_indep_axis)
return {
'primary':(
None
if not self._panel_has_primary_axis else
axarr_prim
),
'secondary':(
None
if not self._panel_has_secondary_axis else
axarr_sec
)
}
_complete = property(_get_complete)
display_indep_axis = property(
_get_display_indep_axis,
_set_display_indep_axis,
doc='Show independent axis flag'
)
r"""
Gets or sets the independent axis display flag; indicates whether the
independent axis is displayed (True) or not (False)
:type: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.display_indep_axis
:raises: (when assigned) RuntimeError (Argument \`display_indep_axis\`
is not valid)
.. [[[end]]]
"""
legend_props = property(
_get_legend_props, _set_legend_props, doc='Panel legend box properties'
)
r"""
Gets or sets the panel legend box properties; this is a dictionary that
has properties (dictionary key) and their associated values (dictionary
values). Currently supported properties are:
* **pos** (*string*) -- legend box position, one of :code:`'BEST'`,
:code:`'UPPER RIGHT'`, :code:`'UPPER LEFT'`, :code:`'LOWER LEFT'`,
:code:`'LOWER RIGHT'`, :code:`'RIGHT'`, :code:`'CENTER LEFT'`,
:code:`'CENTER RIGHT'`, :code:`'LOWER CENTER'`, :code:`'UPPER CENTER'`
or :code:`'CENTER'` (case insensitive)
* **cols** (integer) -- number of columns of the legend box
If :code:`None` the default used is :code:`{'pos':'BEST', 'cols':1}`
.. note:: No legend is shown if a panel has only one series in it or if no
series has a label
:type: dictionary
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.legend_props
:raises: (when assigned)
* RuntimeError (Argument \`legend_props\` is not valid)
* RuntimeError (Legend property \`cols\` is not valid)
* TypeError (Legend property \`pos\` is not one of ['BEST', 'UPPER
RIGHT', 'UPPER LEFT', 'LOWER LEFT', 'LOWER RIGHT', 'RIGHT', 'CENTER
LEFT', 'CENTER RIGHT', 'LOWER CENTER', 'UPPER CENTER', 'CENTER']
(case insensitive))
* ValueError (Illegal legend property \`*[prop_name]*\`)
.. [[[end]]]
"""
log_dep_axis = property(
_get_log_dep_axis,
_set_log_dep_axis,
doc='Panel logarithmic dependent axis flag'
)
r"""
Gets or sets the panel logarithmic dependent (primary and/or secondary)
axis flag; indicates whether the dependent (primary and/or secondary) axis
is linear (False) or logarithmic (True)
:type: boolean
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.log_dep_axis
:raises: (when assigned)
* RuntimeError (Argument \`log_dep_axis\` is not valid)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Series item *[number]* is not fully specified)
* ValueError (Series item *[number]* cannot be plotted in a
logarithmic axis because it contains negative data points)
.. [[[end]]]
"""
primary_axis_label = property(
_get_primary_axis_label,
_set_primary_axis_label,
doc='Panel primary axis label'
)
r"""
Gets or sets the panel primary dependent axis label
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.primary_axis_label
:raises: (when assigned) RuntimeError (Argument \`primary_axis_label\`
is not valid)
.. [[[end]]]
"""
primary_axis_scale = property(
_get_primary_axis_scale, doc='Primary axis scale'
)
"""
Gets the scale of the panel primary axis, :code:`None` if axis has no
series associated with it
:type: float or None
"""
primary_axis_ticks = property(
_get_primary_axis_ticks, doc='Primary axis tick locations'
)
"""
Gets the primary axis (scaled) tick locations, :code:`None` if axis has no
series associated with it
:type: list or None
"""
primary_axis_units = property(
_get_primary_axis_units,
_set_primary_axis_units,
doc='Panel primary axis units'
)
r"""
Gets or sets the panel primary dependent axis units
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.primary_axis_units
:raises: (when assigned) RuntimeError (Argument \`primary_axis_units\`
is not valid)
.. [[[end]]]
"""
secondary_axis_label = property(
_get_secondary_axis_label,
_set_secondary_axis_label,
doc='Panel secondary axis label'
)
r"""
Gets or sets the panel secondary dependent axis label
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.secondary_axis_label
:raises: (when assigned) RuntimeError (Argument
\`secondary_axis_label\` is not valid)
.. [[[end]]]
"""
secondary_axis_scale = property(
_get_secondary_axis_scale,
doc='Secondary axis scale'
)
"""
Gets the scale of the panel secondary axis, :code:`None` if axis has no
series associated with it
:type: float or None
"""
secondary_axis_ticks = property(
_get_secondary_axis_ticks, doc='secondary axis tick locations'
)
"""
Gets the secondary axis (scaled) tick locations, :code:`None` if axis has
no series associated with it
:type: list or None
with it
"""
secondary_axis_units = property(
_get_secondary_axis_units,
_set_secondary_axis_units,
doc='Panel secondary axis units'
)
r"""
Gets or sets the panel secondary dependent axis units
:type: string
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.secondary_axis_units
:raises: (when assigned) RuntimeError (Argument
\`secondary_axis_units\` is not valid)
.. [[[end]]]
"""
series = property(_get_series, _set_series, doc='Panel series')
r"""
Gets or sets the panel series, :code:`None` if there are no series
associated with the panel
:type: :py:class:`putil.plot.Series`, list of
:py:class:`putil.plot.Series` or None
.. [[[cog cog.out(exobj_plot.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. putil.plot.panel.Panel.series
:raises: (when assigned)
* RuntimeError (Argument \`series\` is not valid)
* RuntimeError (Series item *[number]* is not fully specified)
* ValueError (Series item *[number]* cannot be plotted in a
logarithmic axis because it contains negative data points)
.. [[[end]]]
"""
|
|
import base64
from django.contrib import messages
from django.contrib.auth.decorators import permission_required, login_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db import transaction, IntegrityError
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.decorators import method_decorator
from dcim.models import Device
from utilities.views import BulkDeleteView, BulkEditView, ObjectDeleteView, ObjectEditView, ObjectListView
from . import filters, forms, tables
from .decorators import userkey_required
from .models import SecretRole, Secret, SessionKey
def get_session_key(request):
"""
Extract and decode the session key sent with a request. Returns None if no session key was provided.
"""
session_key = request.COOKIES.get('session_key', None)
if session_key is not None:
return base64.b64decode(session_key)
return session_key
#
# Secret roles
#
class SecretRoleListView(ObjectListView):
queryset = SecretRole.objects.annotate(secret_count=Count('secrets'))
table = tables.SecretRoleTable
template_name = 'secrets/secretrole_list.html'
class SecretRoleEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'secrets.change_secretrole'
model = SecretRole
form_class = forms.SecretRoleForm
def get_return_url(self, request, obj):
return reverse('secrets:secretrole_list')
class SecretRoleBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'secrets.delete_secretrole'
cls = SecretRole
default_return_url = 'secrets:secretrole_list'
#
# Secrets
#
@method_decorator(login_required, name='dispatch')
class SecretListView(ObjectListView):
queryset = Secret.objects.select_related('role', 'device')
filter = filters.SecretFilter
filter_form = forms.SecretFilterForm
table = tables.SecretTable
template_name = 'secrets/secret_list.html'
@login_required
def secret(request, pk):
secret = get_object_or_404(Secret, pk=pk)
return render(request, 'secrets/secret.html', {
'secret': secret,
})
@permission_required('secrets.add_secret')
@userkey_required()
def secret_add(request, pk):
# Retrieve device
device = get_object_or_404(Device, pk=pk)
secret = Secret(device=device)
session_key = get_session_key(request)
if request.method == 'POST':
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# We need a valid session key in order to create a Secret
if session_key is None:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
# Create and encrypt the new Secret
else:
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data['plaintext'])
secret.encrypt(master_key)
secret.save()
messages.success(request, u"Added new secret: {}.".format(secret))
if '_addanother' in request.POST:
return redirect('dcim:device_addsecret', pk=device.pk)
else:
return redirect('secrets:secret', pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(request, 'secrets/secret_edit.html', {
'secret': secret,
'form': form,
'return_url': device.get_absolute_url(),
})
@permission_required('secrets.change_secret')
@userkey_required()
def secret_edit(request, pk):
secret = get_object_or_404(Secret, pk=pk)
session_key = get_session_key(request)
if request.method == 'POST':
form = forms.SecretForm(request.POST, instance=secret)
if form.is_valid():
# Re-encrypt the Secret if a plaintext and session key have been provided.
if form.cleaned_data['plaintext'] and session_key is not None:
# Retrieve the master key using the provided session key
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
# Create and encrypt the new Secret
if master_key is not None:
secret = form.save(commit=False)
secret.plaintext = str(form.cleaned_data['plaintext'])
secret.encrypt(master_key)
secret.save()
messages.success(request, u"Modified secret {}.".format(secret))
return redirect('secrets:secret', pk=secret.pk)
else:
form.add_error(None, "Invalid session key. Unable to encrypt secret data.")
# We can't save the plaintext without a session key.
elif form.cleaned_data['plaintext']:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
# If no new plaintext was specified, a session key is not needed.
else:
secret = form.save()
messages.success(request, u"Modified secret {}.".format(secret))
return redirect('secrets:secret', pk=secret.pk)
else:
form = forms.SecretForm(instance=secret)
return render(request, 'secrets/secret_edit.html', {
'secret': secret,
'form': form,
'return_url': reverse('secrets:secret', kwargs={'pk': secret.pk}),
})
class SecretDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'secrets.delete_secret'
model = Secret
default_return_url = 'secrets:secret_list'
@permission_required('secrets.add_secret')
@userkey_required()
def secret_import(request):
session_key = request.COOKIES.get('session_key', None)
if request.method == 'POST':
form = forms.SecretImportForm(request.POST)
if session_key is None:
form.add_error(None, "No session key was provided with the request. Unable to encrypt secret data.")
if form.is_valid():
new_secrets = []
session_key = base64.b64decode(session_key)
master_key = None
try:
sk = SessionKey.objects.get(userkey__user=request.user)
master_key = sk.get_master_key(session_key)
except SessionKey.DoesNotExist:
form.add_error(None, "No session key found for this user.")
if master_key is None:
form.add_error(None, "Invalid private key! Unable to encrypt secret data.")
else:
try:
with transaction.atomic():
for secret in form.cleaned_data['csv']:
secret.encrypt(master_key)
secret.save()
new_secrets.append(secret)
table = tables.SecretTable(new_secrets)
messages.success(request, u"Imported {} new secrets.".format(len(new_secrets)))
return render(request, 'import_success.html', {
'table': table,
'return_url': 'secrets:secret_list',
})
except IntegrityError as e:
form.add_error('csv', "Record {}: {}".format(len(new_secrets) + 1, e.__cause__))
else:
form = forms.SecretImportForm()
return render(request, 'secrets/secret_import.html', {
'form': form,
'return_url': 'secrets:secret_list',
})
class SecretBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'secrets.change_secret'
cls = Secret
filter = filters.SecretFilter
form = forms.SecretBulkEditForm
template_name = 'secrets/secret_bulk_edit.html'
default_return_url = 'secrets:secret_list'
class SecretBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'secrets.delete_secret'
cls = Secret
filter = filters.SecretFilter
default_return_url = 'secrets:secret_list'
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Command-line support for coverage.py."""
import glob
import optparse
import os.path
import sys
import traceback
from coverage import env
from coverage.execfile import run_python_file, run_python_module
from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
from coverage.debug import info_formatter, info_header
class Opts(object):
"""A namespace class for individual options we'll build parsers from."""
append = optparse.make_option(
'-a', '--append', action='store_true',
help="Append coverage data to .coverage, otherwise it is started "
"clean with each run."
)
branch = optparse.make_option(
'', '--branch', action='store_true',
help="Measure branch coverage in addition to statement coverage."
)
CONCURRENCY_CHOICES = [
"thread", "gevent", "greenlet", "eventlet", "multiprocessing",
]
concurrency = optparse.make_option(
'', '--concurrency', action='store', metavar="LIB",
choices=CONCURRENCY_CHOICES,
help="Properly measure code using a concurrency library. "
"Valid values are: %s." % ", ".join(CONCURRENCY_CHOICES)
)
debug = optparse.make_option(
'', '--debug', action='store', metavar="OPTS",
help="Debug options, separated by commas"
)
directory = optparse.make_option(
'-d', '--directory', action='store', metavar="DIR",
help="Write the output files to DIR."
)
fail_under = optparse.make_option(
'', '--fail-under', action='store', metavar="MIN", type="int",
help="Exit with a status of 2 if the total coverage is less than MIN."
)
help = optparse.make_option(
'-h', '--help', action='store_true',
help="Get help on this command."
)
ignore_errors = optparse.make_option(
'-i', '--ignore-errors', action='store_true',
help="Ignore errors while reading source files."
)
include = optparse.make_option(
'', '--include', action='store',
metavar="PAT1,PAT2,...",
help="Include only files whose paths match one of these patterns. "
"Accepts shell-style wildcards, which must be quoted."
)
pylib = optparse.make_option(
'-L', '--pylib', action='store_true',
help="Measure coverage even inside the Python installed library, "
"which isn't done by default."
)
show_missing = optparse.make_option(
'-m', '--show-missing', action='store_true',
help="Show line numbers of statements in each module that weren't "
"executed."
)
skip_covered = optparse.make_option(
'--skip-covered', action='store_true',
help="Skip files with 100% coverage."
)
omit = optparse.make_option(
'', '--omit', action='store',
metavar="PAT1,PAT2,...",
help="Omit files whose paths match one of these patterns. "
"Accepts shell-style wildcards, which must be quoted."
)
output_xml = optparse.make_option(
'-o', '', action='store', dest="outfile",
metavar="OUTFILE",
help="Write the XML report to this file. Defaults to 'coverage.xml'"
)
parallel_mode = optparse.make_option(
'-p', '--parallel-mode', action='store_true',
help="Append the machine name, process id and random number to the "
".coverage data file name to simplify collecting data from "
"many processes."
)
module = optparse.make_option(
'-m', '--module', action='store_true',
help="<pyfile> is an importable Python module, not a script path, "
"to be run as 'python -m' would run it."
)
rcfile = optparse.make_option(
'', '--rcfile', action='store',
help="Specify configuration file. Defaults to '.coveragerc'"
)
source = optparse.make_option(
'', '--source', action='store', metavar="SRC1,SRC2,...",
help="A list of packages or directories of code to be measured."
)
timid = optparse.make_option(
'', '--timid', action='store_true',
help="Use a simpler but slower trace method. Try this if you get "
"seemingly impossible results!"
)
title = optparse.make_option(
'', '--title', action='store', metavar="TITLE",
help="A text string to use as the title on the HTML."
)
version = optparse.make_option(
'', '--version', action='store_true',
help="Display version information and exit."
)
class CoverageOptionParser(optparse.OptionParser, object):
"""Base OptionParser for coverage.py.
Problems don't exit the program.
Defaults are initialized for all options.
"""
def __init__(self, *args, **kwargs):
super(CoverageOptionParser, self).__init__(
add_help_option=False, *args, **kwargs
)
self.set_defaults(
action=None,
append=None,
branch=None,
concurrency=None,
debug=None,
directory=None,
fail_under=None,
help=None,
ignore_errors=None,
include=None,
module=None,
omit=None,
parallel_mode=None,
pylib=None,
rcfile=True,
show_missing=None,
skip_covered=None,
source=None,
timid=None,
title=None,
version=None,
)
self.disable_interspersed_args()
self.help_fn = self.help_noop
def help_noop(self, error=None, topic=None, parser=None):
"""No-op help function."""
pass
class OptionParserError(Exception):
"""Used to stop the optparse error handler ending the process."""
pass
def parse_args_ok(self, args=None, options=None):
"""Call optparse.parse_args, but return a triple:
(ok, options, args)
"""
try:
options, args = \
super(CoverageOptionParser, self).parse_args(args, options)
except self.OptionParserError:
return False, None, None
return True, options, args
def error(self, msg):
"""Override optparse.error so sys.exit doesn't get called."""
self.help_fn(msg)
raise self.OptionParserError
class GlobalOptionParser(CoverageOptionParser):
"""Command-line parser for coverage.py global option arguments."""
def __init__(self):
super(GlobalOptionParser, self).__init__()
self.add_options([
Opts.help,
Opts.version,
])
class CmdOptionParser(CoverageOptionParser):
"""Parse one of the new-style commands for coverage.py."""
def __init__(self, action, options=None, defaults=None, usage=None,
description=None
):
"""Create an OptionParser for a coverage.py command.
`action` is the slug to put into `options.action`.
`options` is a list of Option's for the command.
`defaults` is a dict of default value for options.
`usage` is the usage string to display in help.
`description` is the description of the command, for the help text.
"""
if usage:
usage = "%prog " + usage
super(CmdOptionParser, self).__init__(
prog="coverage %s" % action,
usage=usage,
description=description,
)
self.set_defaults(action=action, **(defaults or {}))
if options:
self.add_options(options)
self.cmd = action
def __eq__(self, other):
# A convenience equality, so that I can put strings in unit test
# results, and they will compare equal to objects.
return (other == "<CmdOptionParser:%s>" % self.cmd)
GLOBAL_ARGS = [
Opts.debug,
Opts.help,
Opts.rcfile,
]
CMDS = {
'annotate': CmdOptionParser("annotate",
[
Opts.directory,
Opts.ignore_errors,
Opts.include,
Opts.omit,
] + GLOBAL_ARGS,
usage = "[options] [modules]",
description = "Make annotated copies of the given files, marking "
"statements that are executed with > and statements that are "
"missed with !."
),
'combine': CmdOptionParser("combine", GLOBAL_ARGS,
usage = "<path1> <path2> ... <pathN>",
description = "Combine data from multiple coverage files collected "
"with 'run -p'. The combined results are written to a single "
"file representing the union of the data. The positional "
"arguments are data files or directories containing data files. "
"If no paths are provided, data files in the default data file's "
"directory are combined."
),
'debug': CmdOptionParser("debug", GLOBAL_ARGS,
usage = "<topic>",
description = "Display information on the internals of coverage.py, "
"for diagnosing problems. "
"Topics are 'data' to show a summary of the collected data, "
"or 'sys' to show installation information."
),
'erase': CmdOptionParser("erase", GLOBAL_ARGS,
usage = " ",
description = "Erase previously collected coverage data."
),
'help': CmdOptionParser("help", GLOBAL_ARGS,
usage = "[command]",
description = "Describe how to use coverage.py"
),
'html': CmdOptionParser("html",
[
Opts.directory,
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.title,
] + GLOBAL_ARGS,
usage = "[options] [modules]",
description = "Create an HTML report of the coverage of the files. "
"Each file gets its own page, with the source decorated to show "
"executed, excluded, and missed lines."
),
'report': CmdOptionParser("report",
[
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.show_missing,
Opts.skip_covered,
] + GLOBAL_ARGS,
usage = "[options] [modules]",
description = "Report coverage statistics on modules."
),
'run': CmdOptionParser("run",
[
Opts.append,
Opts.branch,
Opts.concurrency,
Opts.include,
Opts.module,
Opts.omit,
Opts.pylib,
Opts.parallel_mode,
Opts.source,
Opts.timid,
] + GLOBAL_ARGS,
usage = "[options] <pyfile> [program options]",
description = "Run a Python program, measuring code execution."
),
'xml': CmdOptionParser("xml",
[
Opts.fail_under,
Opts.ignore_errors,
Opts.include,
Opts.omit,
Opts.output_xml,
] + GLOBAL_ARGS,
usage = "[options] [modules]",
description = "Generate an XML report of coverage results."
),
}
OK, ERR, FAIL_UNDER = 0, 1, 2
class CoverageScript(object):
"""The command-line interface to coverage.py."""
def __init__(self, _covpkg=None, _run_python_file=None,
_run_python_module=None, _help_fn=None, _path_exists=None):
# _covpkg is for dependency injection, so we can test this code.
if _covpkg:
self.covpkg = _covpkg
else:
import coverage
self.covpkg = coverage
# For dependency injection:
self.run_python_file = _run_python_file or run_python_file
self.run_python_module = _run_python_module or run_python_module
self.help_fn = _help_fn or self.help
self.path_exists = _path_exists or os.path.exists
self.global_option = False
self.coverage = None
def command_line(self, argv):
"""The bulk of the command line interface to coverage.py.
`argv` is the argument list to process.
Returns 0 if all is well, 1 if something went wrong.
"""
# Collect the command-line options.
if not argv:
self.help_fn(topic='minimum_help')
return OK
# The command syntax we parse depends on the first argument. Global
# switch syntax always starts with an option.
self.global_option = argv[0].startswith('-')
if self.global_option:
parser = GlobalOptionParser()
else:
parser = CMDS.get(argv[0])
if not parser:
self.help_fn("Unknown command: '%s'" % argv[0])
return ERR
argv = argv[1:]
parser.help_fn = self.help_fn
ok, options, args = parser.parse_args_ok(argv)
if not ok:
return ERR
# Handle help and version.
if self.do_help(options, args, parser):
return OK
# Check for conflicts and problems in the options.
if not self.args_ok(options, args):
return ERR
# We need to be able to import from the current directory, because
# plugins may try to, for example, to read Django settings.
sys.path[0] = ''
# Listify the list options.
source = unshell_list(options.source)
omit = unshell_list(options.omit)
include = unshell_list(options.include)
debug = unshell_list(options.debug)
# Do something.
self.coverage = self.covpkg.coverage(
data_suffix = options.parallel_mode,
cover_pylib = options.pylib,
timid = options.timid,
branch = options.branch,
config_file = options.rcfile,
source = source,
omit = omit,
include = include,
debug = debug,
concurrency = options.concurrency,
)
if options.action == "debug":
return self.do_debug(args)
elif options.action == "erase":
self.coverage.erase()
return OK
elif options.action == "run":
return self.do_run(options, args)
elif options.action == "combine":
self.coverage.load()
data_dirs = args or None
self.coverage.combine(data_dirs)
self.coverage.save()
return OK
# Remaining actions are reporting, with some common options.
report_args = dict(
morfs = unglob_args(args),
ignore_errors = options.ignore_errors,
omit = omit,
include = include,
)
self.coverage.load()
total = None
if options.action == "report":
total = self.coverage.report(
show_missing=options.show_missing,
skip_covered=options.skip_covered, **report_args)
elif options.action == "annotate":
self.coverage.annotate(
directory=options.directory, **report_args)
elif options.action == "html":
total = self.coverage.html_report(
directory=options.directory, title=options.title,
**report_args)
elif options.action == "xml":
outfile = options.outfile
total = self.coverage.xml_report(outfile=outfile, **report_args)
if total is not None:
# Apply the command line fail-under options, and then use the config
# value, so we can get fail_under from the config file.
if options.fail_under is not None:
self.coverage.set_option("report:fail_under", options.fail_under)
if self.coverage.get_option("report:fail_under"):
# Total needs to be rounded, but be careful of 0 and 100.
if 0 < total < 1:
total = 1
elif 99 < total < 100:
total = 99
else:
total = round(total)
if total >= self.coverage.get_option("report:fail_under"):
return OK
else:
return FAIL_UNDER
return OK
def help(self, error=None, topic=None, parser=None):
"""Display an error message, or the named topic."""
assert error or topic or parser
if error:
print(error)
print("Use 'coverage help' for help.")
elif parser:
print(parser.format_help().strip())
else:
help_msg = HELP_TOPICS.get(topic, '').strip()
if help_msg:
print(help_msg % self.covpkg.__dict__)
else:
print("Don't know topic %r" % topic)
def do_help(self, options, args, parser):
"""Deal with help requests.
Return True if it handled the request, False if not.
"""
# Handle help.
if options.help:
if self.global_option:
self.help_fn(topic='help')
else:
self.help_fn(parser=parser)
return True
if options.action == "help":
if args:
for a in args:
parser = CMDS.get(a)
if parser:
self.help_fn(parser=parser)
else:
self.help_fn(topic=a)
else:
self.help_fn(topic='help')
return True
# Handle version.
if options.version:
self.help_fn(topic='version')
return True
return False
def args_ok(self, options, args):
"""Check for conflicts and problems in the options.
Returns True if everything is OK, or False if not.
"""
if options.action == "run" and not args:
self.help_fn("Nothing to do.")
return False
if options.append and options.parallel_mode:
self.help_fn("Can't append to data files in parallel mode.")
return False
return True
def do_run(self, options, args):
"""Implementation of 'coverage run'."""
if not self.coverage.get_option("run:parallel"):
if not options.append:
self.coverage.erase()
# Run the script.
self.coverage.start()
code_ran = True
try:
if options.module:
self.run_python_module(args[0], args)
else:
filename = args[0]
self.run_python_file(filename, args)
except NoSource:
code_ran = False
raise
finally:
self.coverage.stop()
if code_ran:
if options.append:
data_file = self.coverage.get_option("run:data_file")
if self.path_exists(data_file):
self.coverage.combine(data_paths=[data_file])
self.coverage.save()
return OK
def do_debug(self, args):
"""Implementation of 'coverage debug'."""
if not args:
self.help_fn("What information would you like: data, sys?")
return ERR
for info in args:
if info == 'sys':
sys_info = self.coverage.sys_info()
print(info_header("sys"))
for line in info_formatter(sys_info):
print(" %s" % line)
elif info == 'data':
self.coverage.load()
data = self.coverage.data
print(info_header("data"))
print("path: %s" % self.coverage.data_files.filename)
if data:
print("has_arcs: %r" % data.has_arcs())
summary = data.line_counts(fullpath=True)
filenames = sorted(summary.keys())
print("\n%d files:" % len(filenames))
for f in filenames:
line = "%s: %d lines" % (f, summary[f])
plugin = data.file_tracer(f)
if plugin:
line += " [%s]" % plugin
print(line)
else:
print("No data collected")
else:
self.help_fn("Don't know what you mean by %r" % info)
return ERR
return OK
def unshell_list(s):
"""Turn a command-line argument into a list."""
if not s:
return None
if env.WINDOWS:
# When running coverage.py as coverage.exe, some of the behavior
# of the shell is emulated: wildcards are expanded into a list of
# filenames. So you have to single-quote patterns on the command
# line, but (not) helpfully, the single quotes are included in the
# argument, so we have to strip them off here.
s = s.strip("'")
return s.split(',')
def unglob_args(args):
"""Interpret shell wildcards for platforms that need it."""
if env.WINDOWS:
globbed = []
for arg in args:
if '?' in arg or '*' in arg:
globbed.extend(glob.glob(arg))
else:
globbed.append(arg)
args = globbed
return args
HELP_TOPICS = {
# -------------------------
'help': """\
Coverage.py, version %(__version__)s
Measure, collect, and report on code coverage in Python programs.
usage: coverage <command> [options] [args]
Commands:
annotate Annotate source files with execution information.
combine Combine a number of data files.
erase Erase previously collected coverage data.
help Get help on using coverage.py.
html Create an HTML report.
report Report coverage stats on modules.
run Run a Python program and measure code execution.
xml Create an XML report of coverage results.
Use "coverage help <command>" for detailed help on any command.
For full documentation, see %(__url__)s
""",
# -------------------------
'minimum_help': """\
Code coverage for Python. Use 'coverage help' for help.
""",
# -------------------------
'version': """\
Coverage.py, version %(__version__)s.
Documentation at %(__url__)s
""",
}
def main(argv=None):
"""The main entry point to coverage.py.
This is installed as the script entry point.
"""
if argv is None:
argv = sys.argv[1:]
try:
status = CoverageScript().command_line(argv)
except ExceptionDuringRun as err:
# An exception was caught while running the product code. The
# sys.exc_info() return tuple is packed into an ExceptionDuringRun
# exception.
traceback.print_exception(*err.args)
status = ERR
except CoverageException as err:
# A controlled error inside coverage.py: print the message to the user.
print(err)
status = ERR
except SystemExit as err:
# The user called `sys.exit()`. Exit with their argument, if any.
if err.args:
status = err.args[0]
else:
status = None
return status
|
|
"""Support for Hass.io."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.homeassistant import SERVICE_CHECK_CONFIG
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_NAME, SERVICE_HOMEASSISTANT_RESTART, SERVICE_HOMEASSISTANT_STOP)
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .auth import async_setup_auth
from .discovery import async_setup_discovery
from .handler import HassIO, HassioAPIError
from .http import HassIOView
from .ingress import async_setup_ingress
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'hassio'
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_FRONTEND_REPO = 'development_repo'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN): vol.Schema({
vol.Optional(CONF_FRONTEND_REPO): cv.isdir,
}),
}, extra=vol.ALLOW_EXTRA)
DATA_HOMEASSISTANT_VERSION = 'hassio_hass_version'
HASSIO_UPDATE_INTERVAL = timedelta(minutes=55)
SERVICE_ADDON_START = 'addon_start'
SERVICE_ADDON_STOP = 'addon_stop'
SERVICE_ADDON_RESTART = 'addon_restart'
SERVICE_ADDON_STDIN = 'addon_stdin'
SERVICE_HOST_SHUTDOWN = 'host_shutdown'
SERVICE_HOST_REBOOT = 'host_reboot'
SERVICE_SNAPSHOT_FULL = 'snapshot_full'
SERVICE_SNAPSHOT_PARTIAL = 'snapshot_partial'
SERVICE_RESTORE_FULL = 'restore_full'
SERVICE_RESTORE_PARTIAL = 'restore_partial'
ATTR_ADDON = 'addon'
ATTR_INPUT = 'input'
ATTR_SNAPSHOT = 'snapshot'
ATTR_ADDONS = 'addons'
ATTR_FOLDERS = 'folders'
ATTR_HOMEASSISTANT = 'homeassistant'
ATTR_PASSWORD = 'password'
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({
vol.Required(ATTR_ADDON): cv.slug,
})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend({
vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)
})
SCHEMA_SNAPSHOT_FULL = vol.Schema({
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_PASSWORD): cv.string,
})
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend({
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
})
SCHEMA_RESTORE_FULL = vol.Schema({
vol.Required(ATTR_SNAPSHOT): cv.slug,
vol.Optional(ATTR_PASSWORD): cv.string,
})
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend({
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
})
MAP_SERVICE_API = {
SERVICE_ADDON_START: ('/addons/{addon}/start', SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STOP: ('/addons/{addon}/stop', SCHEMA_ADDON, 60, False),
SERVICE_ADDON_RESTART:
('/addons/{addon}/restart', SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STDIN:
('/addons/{addon}/stdin', SCHEMA_ADDON_STDIN, 60, False),
SERVICE_HOST_SHUTDOWN: ('/host/shutdown', SCHEMA_NO_DATA, 60, False),
SERVICE_HOST_REBOOT: ('/host/reboot', SCHEMA_NO_DATA, 60, False),
SERVICE_SNAPSHOT_FULL:
('/snapshots/new/full', SCHEMA_SNAPSHOT_FULL, 300, True),
SERVICE_SNAPSHOT_PARTIAL:
('/snapshots/new/partial', SCHEMA_SNAPSHOT_PARTIAL, 300, True),
SERVICE_RESTORE_FULL:
('/snapshots/{snapshot}/restore/full', SCHEMA_RESTORE_FULL, 300, True),
SERVICE_RESTORE_PARTIAL:
('/snapshots/{snapshot}/restore/partial', SCHEMA_RESTORE_PARTIAL, 300,
True),
}
@callback
@bind_hass
def get_homeassistant_version(hass):
"""Return latest available Home Assistant version.
Async friendly.
"""
return hass.data.get(DATA_HOMEASSISTANT_VERSION)
@callback
@bind_hass
def is_hassio(hass):
"""Return true if hass.io is loaded.
Async friendly.
"""
return DOMAIN in hass.config.components
async def async_setup(hass, config):
"""Set up the Hass.io component."""
# Check local setup
for env in ('HASSIO', 'HASSIO_TOKEN'):
if os.environ.get(env):
continue
_LOGGER.error("Missing %s environment variable.", env)
return False
host = os.environ['HASSIO']
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host)
if not await hassio.is_connected():
_LOGGER.warning("Not connected with Hass.io / system to busy!")
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
refresh_token = None
if 'hassio_user' in data:
user = await hass.auth.async_get_user(data['hassio_user'])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
# Migrate old hass.io users to be admin.
if not user.is_admin:
await hass.auth.async_update_user(
user, group_ids=[GROUP_ID_ADMIN])
if refresh_token is None:
user = await hass.auth.async_create_system_user(
'Hass.io', [GROUP_ID_ADMIN])
refresh_token = await hass.auth.async_create_refresh_token(user)
data['hassio_user'] = user.id
await store.async_save(data)
# This overrides the normal API call that would be forwarded
development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO)
if development_repo is not None:
hass.http.register_static_path(
'/api/hassio/app',
os.path.join(development_repo, 'hassio/build'), False)
hass.http.register_view(HassIOView(host, websession))
if 'frontend' in hass.config.components:
await hass.components.panel_custom.async_register_panel(
frontend_url_path='hassio',
webcomponent_name='hassio-main',
sidebar_title='Hass.io',
sidebar_icon='hass:home-assistant',
js_url='/api/hassio/app/entrypoint.js',
embed_iframe=True,
require_admin=True,
)
await hassio.update_hass_api(config.get('http', {}), refresh_token.token)
if 'homeassistant' in config:
await hassio.update_hass_timezone(config['homeassistant'])
async def async_service_handler(service):
"""Handle service calls for Hass.io."""
api_command = MAP_SERVICE_API[service.service][0]
data = service.data.copy()
addon = data.pop(ATTR_ADDON, None)
snapshot = data.pop(ATTR_SNAPSHOT, None)
payload = None
# Pass data to hass.io API
if service.service == SERVICE_ADDON_STDIN:
payload = data[ATTR_INPUT]
elif MAP_SERVICE_API[service.service][3]:
payload = data
# Call API
try:
await hassio.send_command(
api_command.format(addon=addon, snapshot=snapshot),
payload=payload, timeout=MAP_SERVICE_API[service.service][2]
)
except HassioAPIError as err:
_LOGGER.error("Error on Hass.io API: %s", err)
for service, settings in MAP_SERVICE_API.items():
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=settings[1])
async def update_homeassistant_version(now):
"""Update last available Home Assistant version."""
try:
data = await hassio.get_homeassistant_info()
hass.data[DATA_HOMEASSISTANT_VERSION] = data['last_version']
except HassioAPIError as err:
_LOGGER.warning("Can't read last version: %s", err)
hass.helpers.event.async_track_point_in_utc_time(
update_homeassistant_version, utcnow() + HASSIO_UPDATE_INTERVAL)
# Fetch last version
await update_homeassistant_version(None)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
await hassio.stop_homeassistant()
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See dev-info panel for details.",
"Config validating", "{0}.check_config".format(HASS_DOMAIN))
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
await hassio.restart_homeassistant()
# Mock core services
for service in (SERVICE_HOMEASSISTANT_STOP, SERVICE_HOMEASSISTANT_RESTART,
SERVICE_CHECK_CONFIG):
hass.services.async_register(
HASS_DOMAIN, service, async_handle_core_service)
# Init discovery Hass.io feature
async_setup_discovery(hass, hassio, config)
# Init auth Hass.io feature
async_setup_auth(hass)
# Init ingress Hass.io feature
async_setup_ingress(hass, host)
return True
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ProjectState
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument('app_label', nargs='?',
help='App label of an application to synchronize the state.')
parser.add_argument('migration_name', nargs='?',
help=(
'Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.'
),
)
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.')
parser.add_argument('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them.')
parser.add_argument('--fake-initial', action='store_true', dest='fake_initial', default=False,
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.')
parser.add_argument('--list', '-l', action='store_true', dest='list', default=False,
help='Show a list of all known migrations and which are applied.')
parser.add_argument('--run-syncdb', action='store_true', dest='run_syncdb',
help='Creates tables for apps without migrations.')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# If they asked for a migration listing, quit embedded execution flow and show it
if options.get("list", False):
warnings.warn(
"The 'migrate --list' command is deprecated. Use 'showmigrations' instead.",
RemovedInDjango110Warning, stacklevel=2)
self.stdout.ending = None # Remove when #21429 is fixed
return call_command(
'showmigrations',
'--list',
app_labels=[options['app_label']] if options['app_label'] else None,
database=db,
no_color=options.get('no_color'),
settings=options.get('settings'),
stdout=self.stdout,
traceback=options.get('traceback'),
verbosity=self.verbosity,
)
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
run_syncdb = options.get('run_syncdb') and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(executor.loader.unmigrated_apps))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(set(a for a, n in targets)) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
emit_pre_migrate_signal(self.verbosity, self.interactive, connection.alias)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
executor.check_replacements()
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
else:
fake = options.get("fake")
fake_initial = options.get("fake_initial")
executor.migrate(targets, plan, fake=fake, fake_initial=fake_initial)
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.MIGRATE_SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.MIGRATE_SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"Runs the old syncdb-style operation on a list of app_labels."
cursor = connection.cursor()
try:
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names(cursor)
created_models = set()
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False))
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=connection.features.can_rollback_ddl):
deferred_sql = []
for app_name, model_list in manifest.items():
for model in model_list:
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
with connection.schema_editor() as editor:
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
deferred_sql.extend(editor.deferred_sql)
editor.deferred_sql = []
created_models.add(model)
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
for statement in deferred_sql:
cursor.execute(statement)
finally:
cursor.close()
return created_models
|
|
import json
from django.core.urlresolvers import reverse
import responses
from nose.tools import eq_, ok_
from mkt.api.tests.test_oauth import RestOAuth
from mkt.constants.applications import DEVICE_DESKTOP
from mkt.constants.base import CONTENT_ICON_SIZES, STATUS_PENDING
from mkt.constants.regions import URY, USA
from mkt.site.fixtures import fixture
from mkt.site.tests import ESTestCase, TestCase
from mkt.tags.models import Tag
from mkt.users.models import UserProfile
from mkt.websites.models import Website, WebsiteSubmission
from mkt.websites.utils import website_factory
from mkt.websites.views import WebsiteMetadataScraperView
class TestWebsiteESView(RestOAuth, ESTestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.website = website_factory(**{
'title': 'something',
'categories': ['books-comics', 'sports'],
# Preferred_regions and devices are stored as a json array of ids.
'devices': [DEVICE_DESKTOP.id],
'preferred_regions': [URY.id, USA.id],
'icon_type': 'image/png',
'icon_hash': 'fakehash',
})
self.category = 'books-comics'
self.url = reverse('api-v2:website-search-api')
super(TestWebsiteESView, self).setUp()
self.refresh('website')
def tearDown(self):
Website.get_indexer().unindexer(_all=True)
super(TestWebsiteESView, self).tearDown()
def test_verbs(self):
self._allowed_verbs(self.url, ['get'])
def test_has_cors(self):
self.assertCORS(self.anon.get(self.url), 'get')
def test_basic(self):
with self.assertNumQueries(0):
response = self.anon.get(self.url)
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 1)
data = response.json['objects'][0]
eq_(data['description'], {'en-US': self.website.description})
eq_(data['title'], {'en-US': self.website.title})
eq_(data['name'], {'en-US': self.website.name})
eq_(data['short_name'], {'en-US': self.website.short_name})
eq_(data['url'], self.website.url)
eq_(data['mobile_url'], self.website.mobile_url)
eq_(data['categories'], ['books-comics', 'sports'])
eq_(data['description'], {'en-US': self.website.description})
eq_(data['device_types'], ['desktop']),
eq_(data['icons']['128'], self.website.get_icon_url(128))
ok_(data['icons']['128'].endswith('?modified=fakehash'))
eq_(sorted(int(k) for k in data['icons'].keys()), CONTENT_ICON_SIZES)
eq_(data['mobile_url'], self.website.mobile_url)
eq_(data['name'], {'en-US': self.website.name})
eq_(data['short_name'], {'en-US': self.website.short_name})
eq_(data['title'], {'en-US': self.website.title})
eq_(data['url'], self.website.url)
def test_list(self):
self.website2 = website_factory(url='http://www.lol.com/')
self.refresh('website')
with self.assertNumQueries(0):
response = self.anon.get(self.url)
eq_(response.status_code, 200)
eq_(len(response.json['objects']), 2)
def test_wrong_category(self):
res = self.anon.get(self.url, data={'cat': self.category + 'xq'})
eq_(res.status_code, 400)
eq_(res['Content-Type'], 'application/json')
def test_right_category_but_not_present(self):
self.category = 'travel'
res = self.anon.get(self.url, data={'cat': self.category})
eq_(res.status_code, 200)
eq_(res.json['objects'], [])
def test_right_category_present(self):
res = self.anon.get(self.url, data={'cat': self.category})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 1)
def test_region_preference(self):
# Websites don't have region exclusions, only "preferred" regions.
res = self.anon.get(self.url, data={'region': 'br'})
eq_(res.status_code, 200)
eq_(len(res.json['objects']), 1)
res = self.anon.get(self.url, data={'region': 'us'})
eq_(res.status_code, 200)
eq_(len(res.json['objects']), 1)
def test_q(self):
res = self.anon.get(self.url, data={'q': 'something'})
eq_(res.status_code, 200)
obj = res.json['objects'][0]
eq_(obj['id'], self.website.pk)
def test_q_relevancy(self):
# Add 2 websites - the last one has 'something' appearing in both its
# title and its description, so it should be booster and appear higher
# in the results.
website_factory(title='something')
boosted_website = website_factory(title='something',
description='something')
self.reindex(Website)
res = self.anon.get(self.url, data={'q': 'something'})
eq_(res.status_code, 200)
eq_(len(res.json['objects']), 3)
obj = res.json['objects'][0]
eq_(obj['id'], boosted_website.pk)
def test_q_relevancy_region(self):
# Add another website without any preferred regions: it should rank
# higher without region (description increases its score), lower with
# one (region preference increases the score for the initial website).
self.website2 = website_factory(title='something',
description='something')
self.reindex(Website)
res = self.anon.get(self.url, data={'q': 'something'})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 2)
eq_(objs[0]['id'], self.website2.pk)
eq_(objs[1]['id'], self.website.pk)
res = self.anon.get(self.url, data={'q': 'something', 'region': 'us'})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 2)
eq_(objs[0]['id'], self.website.pk)
eq_(objs[1]['id'], self.website2.pk)
def test_device_not_present(self):
res = self.anon.get(
self.url, data={'dev': 'android', 'device': 'tablet'})
eq_(res.status_code, 200)
eq_(len(res.json['objects']), 0)
def test_device_present(self):
res = self.anon.get(self.url, data={'dev': 'desktop'})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 1)
def test_keywords(self):
website_factory()
self.website.keywords.add(Tag.objects.create(tag_text='hodor'))
self.website.keywords.add(Tag.objects.create(tag_text='radar'))
self.website.save()
self.refresh('website')
res = self.anon.get(self.url, data={'q': 'hodor'})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 1)
eq_(sorted(objs[0]['keywords']), sorted(['hodor', 'radar']))
class TestWebsiteView(RestOAuth, TestCase):
def setUp(self):
super(TestWebsiteView, self).setUp()
self.website = website_factory(**{
'categories': ['books-comics', 'sports'],
# Preferred_regions and devices are stored as a json array of ids.
'devices': [DEVICE_DESKTOP.id],
'preferred_regions': [URY.id, USA.id],
'icon_type': 'image/png',
'icon_hash': 'fakehash',
})
self.url = reverse('api-v2:website-detail',
kwargs={'pk': self.website.pk})
def test_verbs(self):
self._allowed_verbs(self.url, ['get'])
def test_has_cors(self):
self.assertCORS(self.anon.get(self.url), 'get')
def test_basic(self):
response = self.anon.get(self.url)
eq_(response.status_code, 200)
data = response.json
eq_(data['description'], {'en-US': self.website.description})
eq_(data['title'], {'en-US': self.website.title})
eq_(data['name'], {'en-US': self.website.name})
eq_(data['short_name'], {'en-US': self.website.short_name})
eq_(data['url'], self.website.url)
eq_(data['mobile_url'], self.website.mobile_url)
eq_(data['categories'], ['books-comics', 'sports'])
eq_(data['description'], {'en-US': self.website.description})
eq_(data['device_types'], ['desktop']),
eq_(data['icons']['128'], self.website.get_icon_url(128))
ok_(data['icons']['128'].endswith('?modified=fakehash'))
eq_(sorted(int(k) for k in data['icons'].keys()), CONTENT_ICON_SIZES)
eq_(data['mobile_url'], self.website.mobile_url)
eq_(data['name'], {'en-US': self.website.name})
eq_(data['short_name'], {'en-US': self.website.short_name})
eq_(data['title'], {'en-US': self.website.title})
eq_(data['url'], self.website.url)
def test_disabled(self):
self.website.update(is_disabled=True)
response = self.anon.get(self.url)
eq_(response.status_code, 404)
def test_wrong_status(self):
self.website.update(status=STATUS_PENDING)
response = self.anon.get(self.url)
eq_(response.status_code, 404)
class TestReviewerSearch(RestOAuth, ESTestCase):
fixtures = fixture('user_2519')
def setUp(self):
self.website = website_factory(**{
'title': 'something',
'categories': json.dumps(['books-comics', 'sports']),
'status': STATUS_PENDING,
})
self.url = reverse('api-v2:reviewers-website-search-api')
self.user = UserProfile.objects.get(pk=2519)
self.grant_permission(self.user, 'Apps:Review')
super(TestReviewerSearch, self).setUp()
self.refresh('website')
def tearDown(self):
Website.get_indexer().unindexer(_all=True)
super(TestReviewerSearch, self).tearDown()
def test_access(self):
eq_(self.anon.get(self.url).status_code, 403)
self.remove_permission(self.user, 'Apps:Review')
eq_(self.client.get(self.url).status_code, 403)
def test_verbs(self):
self._allowed_verbs(self.url, ['get'])
def test_has_cors(self):
self.assertCORS(self.client.get(self.url), 'get')
def test_status_filtering(self):
res = self.client.get(self.url, data={'status': 'public'})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 0)
res = self.client.get(self.url, data={'status': 'pending'})
eq_(res.status_code, 200)
objs = res.json['objects']
eq_(len(objs), 1)
class TestWebsiteScrape(RestOAuth, TestCase):
def setUp(self):
self.url = reverse('api-v2:website-scrape')
super(TestWebsiteScrape, self).setUp()
def go(self, url=None):
qs = {}
if url:
qs = {'url': url}
response = self.anon.get(self.url, qs)
return response, json.loads(response.content)
def test_no_url(self):
response, content = self.go()
eq_(response.status_code, 400)
eq_(content, WebsiteMetadataScraperView.errors['no_url'])
@responses.activate
def test_site_404(self):
URL = 'https://marketplace.firefox.com/'
responses.add(responses.GET, URL, status=404)
response, content = self.go(url=URL)
eq_(response.status_code, 400)
eq_(content, WebsiteMetadataScraperView.errors['network'])
@responses.activate
def test_site_500(self):
URL = 'https://marketplace.firefox.com/'
responses.add(responses.GET, URL, status=500)
response, content = self.go(url=URL)
eq_(response.status_code, 400)
eq_(content, WebsiteMetadataScraperView.errors['network'])
@responses.activate
def test_empty_body(self):
URL = 'https://marketplace.firefox.com/'
responses.add(responses.GET, URL, '', status=200)
response, content = self.go(url=URL)
eq_(response.status_code, 400)
eq_(content, WebsiteMetadataScraperView.errors['malformed_data'])
@responses.activate
def test_valid(self):
URL = 'https://marketplace.firefox.com/'
responses.add(responses.GET, URL, '<html />', status=200)
response, content = self.go(url=URL)
eq_(response.status_code, 200)
def test_verbs(self):
self._allowed_verbs(self.url, ['get'])
def test_has_cors(self):
self.assertCORS(self.client.get(self.url), 'get')
class TestWebsiteSubmissionViewSetCreate(RestOAuth, TestCase):
def setUp(self):
self.url = reverse('api-v2:website-submit')
self.data = {
'canonical_url': 'https://www.bro.app',
'categories': ['lifestyle', 'music'],
'detected_icon': 'https://www.bro.app/apple-touch.png',
'description': 'We cannot tell you what a Bro is. But bros know.',
'keywords': ['social networking', 'Gilfoyle', 'Silicon Valley'],
'name': 'Bro',
'preferred_regions': ['us', 'ca', 'fr'],
'public_credit': False,
'url': 'https://m.bro.app',
'why_relevant': 'Ummm...bro. You know.',
'works_well': 3
}
super(TestWebsiteSubmissionViewSetCreate, self).setUp()
def go(self, anon=False):
client = self.client
if anon:
client = self.anon
response = client.post(self.url, json.dumps(self.data))
return response, json.loads(response.content)
def compare_values(self, content):
ok_('id' in content)
eq_(content['canonical_url'], self.data['canonical_url'])
eq_(content['categories'], self.data['categories'])
eq_(content['detected_icon'], self.data['detected_icon'])
eq_(content['keywords'], self.data['keywords'])
eq_(content['preferred_regions'], self.data['preferred_regions'])
eq_(content['public_credit'], self.data['public_credit'])
eq_(content['url'], self.data['url'])
eq_(content['why_relevant'], self.data['why_relevant'])
eq_(content['works_well'], self.data['works_well'])
ok_(self.data['description'] in content['description'].values())
ok_(self.data['name'] in content['name'].values())
def missing_field(self, field_name, failure=True):
self.data[field_name] = None
response, content = self.go()
eq_(response.status_code, 400 if failure else 201)
return response, content
def test_get(self):
self.grant_permission(self.user, 'Websites:Submit')
response = self.client.get(self.url)
eq_(response.status_code, 405)
def test_get_no_perms(self):
response = self.client.get(self.url)
eq_(response.status_code, 403)
def test_post(self):
self.grant_permission(self.user, 'Websites:Submit')
response, content = self.go()
eq_(response.status_code, 201)
self.compare_values(content)
eq_(WebsiteSubmission.objects.all()[0].submitter, self.user)
def test_post_no_perms(self):
response, content = self.go()
eq_(response.status_code, 403)
def test_post_anon(self):
response, content = self.go(anon=True)
eq_(response.status_code, 403)
def test_allow_empty_preferred_regions(self):
self.grant_permission(self.user, 'Websites:Submit')
self.data['preferred_regions'] = []
response, content = self.go()
eq_(response.status_code, 201)
eq_(content['preferred_regions'], [])
class TestWebsiteSubmissionViewSetList(RestOAuth, TestCase):
def setUp(self):
self.url = reverse('api-v2:website-submissions')
self.data = {
'canonical_url': 'https://www.bro.app',
'categories': ['lifestyle', 'music'],
'detected_icon': 'https://www.bro.app/apple-touch.png',
'description': 'We cannot tell you what a Bro is. But bros know.',
'keywords': ['social networking', 'Gilfoyle', 'Silicon Valley'],
'name': 'Bro',
'preferred_regions': ['us', 'ca', 'fr'],
'public_credit': False,
'url': 'https://m.bro.app',
'why_relevant': 'Ummm...bro. You know.',
'works_well': 3
}
super(TestWebsiteSubmissionViewSetList, self).setUp()
def test_list(self):
WebsiteSubmission.objects.create(**self.data)
WebsiteSubmission.objects.create(**self.data)
self.grant_permission(self.user, 'Websites:Submit')
response = self.client.get(self.url)
eq_(response.status_code, 200)
eq_(response.json['objects'][0]['url'], 'https://m.bro.app')
eq_(response.json['meta']['total_count'], 2)
def test_anon(self):
response = self.client.get(self.url)
eq_(response.status_code, 403)
|
|
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## don't touch: must be first import!
import choosereactor
import os, json, sys, pkg_resources
from twisted.internet import reactor
from twisted.python import log, usage
from twisted.internet.defer import Deferred
## for versions
import autobahn
import autobahntestsuite
from autobahn.websocket.utf8validator import Utf8Validator
from autobahn.websocket.xormasker import XorMaskerNull
## WebSocket testing modes
import testee
import fuzzing
## WAMP testing modes
import wamptestee
import wampfuzzing
## Misc testing modes
import echo
import broadcast
import massconnect
import wsperfcontrol
import wsperfmaster
from spectemplate import SPEC_FUZZINGSERVER, \
SPEC_FUZZINGCLIENT, \
SPEC_FUZZINGWAMPSERVER, \
SPEC_FUZZINGWAMPCLIENT, \
SPEC_WSPERFCONTROL, \
SPEC_MASSCONNECT
class WsTestOptions(usage.Options):
"""
Reads options from the command-line and checks them for plausibility.
"""
# Available modes, specified with the --mode (or short: -m) flag.
MODES = ['echoserver',
'echoclient',
'broadcastclient',
'broadcastserver',
'fuzzingserver',
'fuzzingclient',
'fuzzingwampserver',
'fuzzingwampclient',
'testeeserver',
'testeeclient',
'wsperfcontrol',
'wsperfmaster',
'wampserver',
'wamptesteeserver',
'wampclient',
'massconnect',
'web',
'import',
'export']
# Modes that need a specification file
MODES_NEEDING_SPEC = ['fuzzingclient',
'fuzzingserver',
'fuzzingwampserver',
'fuzzingwampclient',
'wsperfcontrol',
'massconnect',
'import']
# Modes that need a Websocket URI
MODES_NEEDING_WSURI = ['echoclient',
'echoserver',
'broadcastclient',
'broadcastserver',
'testeeclient',
'testeeserver',
'wsperfcontrol',
'wampserver',
'wampclient',
'wamptesteeserver']
# Default content of specification files for various modes
DEFAULT_SPECIFICATIONS = {'fuzzingclient': SPEC_FUZZINGCLIENT,
'fuzzingserver': SPEC_FUZZINGSERVER,
'wsperfcontrol': SPEC_WSPERFCONTROL,
'massconnect': SPEC_MASSCONNECT,
'fuzzingwampclient': SPEC_FUZZINGWAMPCLIENT,
'fuzzingwampserver': SPEC_FUZZINGWAMPSERVER}
optParameters = [
['mode', 'm', None, 'Test mode, one of: %s [required]' % ', '.join(MODES)],
['testset', 't', None, 'Run a test set from an import test spec.'],
['spec', 's', None, 'Test specification file [required in some modes].'],
['wsuri', 'w', None, 'WebSocket URI [required in some modes].'],
['ident', 'i', None, ('Testee client identifier [optional for client testees].')],
['key', 'k', None, ('Server private key file for secure WebSocket (WSS) [required in server modes for WSS].')],
['cert', 'c', None, ('Server certificate file for secure WebSocket (WSS) [required in server modes for WSS].')]
]
optFlags = [
['debug', 'd', 'Debug output [default: off].'],
['autobahnversion', 'a', 'Print version information for Autobahn and AutobahnTestSuite.']
]
def postOptions(self):
"""
Process the given options. Perform plausibility checks, etc...
"""
if self['autobahnversion']:
print "Autobahn %s" % autobahn.version
print "AutobahnTestSuite %s" % autobahntestsuite.version
sys.exit(0)
if not self['mode']:
raise usage.UsageError, "a mode must be specified to run!"
if self['mode'] not in WsTestOptions.MODES:
raise usage.UsageError, (
"Mode '%s' is invalid.\nAvailable modes:\n\t- %s" % (
self['mode'], "\n\t- ".join(sorted(WsTestOptions.MODES))))
if (self['mode'] in WsTestOptions.MODES_NEEDING_WSURI and not self['wsuri']):
raise usage.UsageError, "mode needs a WebSocket URI!"
class WsTestRunner(object):
"""
Testsuite driver.
"""
def __init__(self, options, spec = None):
self.options = options
self.spec = spec
self.debug = self.options.get('debug', False)
if self.debug:
log.startLogging(sys.stdout)
self.mode = str(self.options['mode'])
def startService(self):
"""
Start mode specific services.
"""
print
print "Using Twisted reactor class %s" % str(reactor.__class__)
print "Using UTF8 Validator class %s" % str(Utf8Validator)
print "Using XOR Masker classes %s" % str(XorMaskerNull)
print "Using JSON processor module '%s'" % str(autobahn.wamp.json_lib.__name__)
print
if self.mode == "import":
return self.startImportSpec(self.options['spec'])
elif self.mode == "export":
return self.startExportSpec(self.options['testset'], self.options.get('spec', None))
elif self.mode == "fuzzingwampclient":
return self.startFuzzingWampClient(self.options['testset'])
elif self.mode == "web":
return self.startWeb(debug = self.debug)
elif self.mode == "testeeclient":
return testee.startClient(self.options['wsuri'], ident = self.options['ident'], debug = self.debug)
elif self.mode == "testeeserver":
return testee.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "broadcastclient":
return broadcast.startClient(self.options['wsuri'], debug = self.debug)
elif self.mode == "broadcastserver":
return broadcast.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "echoclient":
return echo.startClient(self.options['wsuri'], debug = self.debug)
elif self.mode == "echoserver":
return echo.startServer(self.options['wsuri'], debug = self.debug)
elif self.mode == "fuzzingclient":
return fuzzing.startClient(self.spec, debug = self.debug)
elif self.mode == "fuzzingserver":
return fuzzing.startServer(self.spec, debug = self.debug)
elif self.mode == "wsperfcontrol":
return wsperfcontrol.startClient(self.options['wsuri'], self.spec, debug = self.debug)
elif self.mode == "wsperfmaster":
return wsperfmaster.startServer(debug = self.debug)
elif self.mode == "massconnect":
return massconnect.startClient(self.spec, debug = self.debug)
else:
raise Exception("no mode '%s'" % self.mode)
def start(options, spec = None):
"""
Actually startup a wstest run.
:param options: Global options controlling wstest.
:type options: dict
:param spec: Test specification needed for certain modes. If none is given, but
a spec is needed, a default spec is used.
:type spec: dict
"""
if options['mode'] in WsTestOptions.MODES_NEEDING_SPEC and spec is None:
spec = json.loads(WsTestOptions.DEFAULT_SPECIFICATIONS[options['mode']])
wstest = WsTestRunner(options, spec)
res = wstest.startService()
## only start reactor for modes needing it
##
if res:
## if mode wants to shutdown reactor after done (e.g. clients),
## hook up machinery to do so
##
if isinstance(res, Deferred):
def shutdown(_):
reactor.stop()
res.addBoth(shutdown)
reactor.run()
def run():
"""
Run wstest from command line. This parses command line args etc.
"""
## parse wstest command lines options
##
cmdOpts = WsTestOptions()
try:
cmdOpts.parseOptions()
except usage.UsageError, errortext:
print '%s %s\n' % (sys.argv[0], errortext)
print 'Try %s --help for usage details\n' % sys.argv[0]
sys.exit(1)
else:
options = cmdOpts.opts
## check if mode needs a spec ..
##
if options['mode'] in WsTestOptions.MODES_NEEDING_SPEC:
## .. if none was given ..
##
if not options['spec']:
## .. assume canonical specfile name ..
##
filename = "%s.json" % options['mode']
options['spec'] = filename
if not os.path.isfile(filename):
## .. if file does not exist, autocreate a spec file
##
content = WsTestOptions.DEFAULT_SPECIFICATIONS[options['mode']]
print "Auto-generating spec file '%s'" % filename
f = open(filename, 'w')
f.write(content)
f.close()
else:
## .. use existing one
##
print "Using implicit spec file '%s'" % filename
else:
## use explicitly given specfile
##
print "Using explicit spec file '%s'" % options['spec']
## now load the spec ..
##
spec_filename = os.path.abspath(options['spec'])
print "Loading spec from %s" % spec_filename
spec = json.loads(open(spec_filename).read())
else:
## mode does not rely on spec
##
spec = None
## now start a wstest run ..
##
start(options, spec)
if __name__ == '__main__':
run()
|
|
# -*- coding: utf-8 -*-
'''
Management zpool
:maintainer: Jorge Schrauwen <[email protected]>
:maturity: new
:depends: zpool
:platform: smartos, illumos, solaris, freebsd, linux
.. versionadded:: 2016.3.0
.. code-block:: yaml
oldpool:
zpool.absent:
- export: true
newpool:
zpool.present:
- config:
import: false
force: true
- properties:
comment: salty storage pool
- layout:
mirror-0:
/dev/disk0
/dev/disk1
mirror-1:
/dev/disk2
/dev/disk3
simplepool:
zpool.present:
- config:
import: false
force: true
- properties:
comment: another salty storage pool
- layout:
- /dev/disk0
- /dev/disk1
.. warning::
The layout will never be updated, it will only be used at time of creation.
It's a whole lot of work to figure out if a devices needs to be detached, removed, ... this is best done by the sysadmin on a case per case basis.
Filesystem properties are also not updated, this should be managed by the zfs state module.
'''
from __future__ import absolute_import
# Import Python libs
import os
import logging
# Import Salt libs
from salt.utils.odict import OrderedDict
log = logging.getLogger(__name__)
# Define the state's virtual name
__virtualname__ = 'zpool'
def __virtual__():
'''
Provides zpool state
'''
if 'zpool.create' in __salt__:
return True
else:
return (
False,
'{0} state module can only be loaded on illumos, Solaris, SmartOS, FreeBSD, ...'.format(
__virtualname__
)
)
def present(name, properties=None, filesystem_properties=None, layout=None, config=None):
'''
ensure storage pool is present on the system
name : string
name of storage pool
properties : dict
optional set of properties to set for the storage pool
filesystem_properties : dict
optional set of filesystem properties to set for the storage pool (creation only)
layout: dict
disk layout to use if the pool does not exist (creation only)
config : dict
fine grain control over this state
.. note::
The following configuration properties can be toggled in the config parameter.
- import (true) - try to import the pool before creating it if absent
- import_dirs (None) - specify additional locations to scan for devices on import
- device_dir (None, SunOS=/dev/rdsk) - specify device directory to use if not absolute path
- force (false) - try to force the import or creation
.. note::
Because ID's inside the layout dict must be unique they need to have a suffix.
.. code-block:: yaml
mirror-0:
/tmp/vdisk3
/tmp/vdisk2
mirror-1:
/tmp/vdisk0
/tmp/vdisk1
The above yaml will always result in the following zpool create:
.. code-block:: bash
zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1
.. warning::
Pay attention to the order of your dict!
.. code-block:: yaml
mirror-0:
/tmp/vdisk0
/tmp/vdisk1
/tmp/vdisk2:
The above will result in the following zpool create:
.. code-block:: bash
zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2
Creating a 3-way mirror! Why you probably expect it to be mirror root vdev with 2 devices + a root vdev of 1 device!
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
state_config = config if config else {}
config = {
'import': True,
'import_dirs': None,
'device_dir': None,
'force': False
}
if __grains__['kernel'] == 'SunOS':
config['device_dir'] = '/dev/rdsk'
elif __grains__['kernel'] == 'Linux':
config['device_dir'] = '/dev'
config.update(state_config)
log.debug('zpool.present::{0}::config - {1}'.format(name, config))
# parse layout
if layout:
for root_dev in layout:
if '-' not in root_dev:
continue
layout[root_dev] = layout[root_dev].keys() if isinstance(layout[root_dev], OrderedDict) else layout[root_dev].split(' ')
log.debug('zpool.present::{0}::layout - {1}'.format(name, layout))
# ensure the pool is present
ret['result'] = False
if __salt__['zpool.exists'](name): # update
ret['result'] = True
# retrieve current properties
properties_current = __salt__['zpool.get'](name)[name]
# figure out if updates needed
properties_update = []
for prop in properties:
if prop not in properties_current:
continue
value = properties[prop]
if isinstance(value, bool):
value = 'on' if value else 'off'
if properties_current[prop] != value:
properties_update.append(prop)
# update properties
for prop in properties_update:
value = properties[prop]
res = __salt__['zpool.set'](name, prop, value)
# also transform value so we match with the return
if isinstance(value, bool):
value = 'on' if value else 'off'
elif ' ' in value:
value = "'{0}'".format(value)
# check return
if name in res and prop in res[name] and res[name][prop] == value:
if name not in ret['changes']:
ret['changes'][name] = {}
ret['changes'][name].update(res[name])
else:
ret['result'] = False
if ret['comment'] == '':
ret['comment'] = 'The following properties were not updated:'
ret['comment'] = '{0} {1}'.format(ret['comment'], prop)
if ret['result']:
ret['comment'] = 'properties updated' if len(ret['changes']) > 0 else 'no update needed'
else: # import or create
if config['import']: # try import
log.debug('zpool.present::{0}::importing'.format(name))
ret['result'] = __salt__['zpool.import'](
name,
force=config['force'],
dir=config['import_dirs']
)
ret['result'] = ret['result'].get(name) == 'imported'
if ret['result']:
ret['changes'][name] = 'imported'
ret['comment'] = 'storage pool {0} was imported'.format(name)
if not ret['result']: # create
if not layout:
ret['comment'] = 'storage pool {0} was not imported, no layout specified for creation'.format(name)
else:
log.debug('zpool.present::{0}::creating'.format(name))
if __opts__['test']:
ret['result'] = True
else:
# construct *vdev parameter for zpool.create
params = []
params.append(name)
for root_dev in layout:
if '-' in root_dev: # special device
# NOTE: accomidate non existing 'disk' vdev
if root_dev.split('-')[0] != 'disk':
params.append(root_dev.split('-')[0]) # add the type by stripping the ID
for sub_dev in layout[root_dev]: # add all sub devices
if '/' not in sub_dev and config['device_dir'] and os.path.exists(config['device_dir']):
sub_dev = os.path.join(config['device_dir'], sub_dev)
params.append(sub_dev)
else: # normal device
if '/' not in root_dev and config['device_dir'] and os.path.exists(config['device_dir']):
root_dev = os.path.join(config['device_dir'], root_dev)
params.append(root_dev)
# execute zpool.create
ret['result'] = __salt__['zpool.create'](*params, force=config['force'], properties=properties, filesystem_properties=filesystem_properties)
if ret['result'].get(name).startswith('created'):
ret['result'] = True
else:
if ret['result'].get(name):
ret['comment'] = ret['result'].get(name)
ret['result'] = False
if ret['result']:
ret['changes'][name] = 'created'
ret['comment'] = 'storage pool {0} was created'.format(name)
return ret
def absent(name, export=False, force=False):
'''
ensure storage pool is absent on the system
name : string
name of storage pool
export : boolean
export instread of destroy the zpool if present
force : boolean
force destroy or export
'''
name = name.lower()
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
# config defaults
log.debug('zpool.absent::{0}::config::force = {1}'.format(name, force))
log.debug('zpool.absent::{0}::config::export = {1}'.format(name, export))
# ensure the pool is absent
if __salt__['zpool.exists'](name): # looks like we need to do some work
ret['result'] = False
if export: # try to export the zpool
if __opts__['test']:
ret['result'] = True
else:
ret['result'] = __salt__['zpool.export'](name, force=force)
ret['result'] = ret['result'].get(name) == 'exported'
else: # try to destroy the zpool
if __opts__['test']:
ret['result'] = True
else:
ret['result'] = __salt__['zpool.destroy'](name, force=force)
ret['result'] = ret['result'].get(name) == 'destroyed'
if ret['result']: # update the changes and comment
ret['changes'][name] = 'exported' if export else 'destroyed'
ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name])
else: # we are looking good
ret['result'] = True
ret['comment'] = 'storage pool {0} is absent'.format(name)
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
"""
Unit tests for format checking
"""
from nose.plugins.skip import SkipTest
import os
import pylearn2
from pylearn2.devtools.tests.docscrape import docstring_errors
from pylearn2.devtools.list_files import list_files
from pylearn2.devtools.tests.pep8.pep8 import StyleGuide
whitelist_pep8 = [
"rbm_tools.py",
"training_algorithms/tests/test_learning_rule.py",
"distributions/mnd.py",
"models/sparse_autoencoder.py",
"models/tests/test_dbm.py",
"models/tests/test_autoencoder.py",
"models/tests/test_s3c_inference.py",
"models/tests/test_mnd.py",
"models/tests/test_s3c_misc.py",
"models/gsn.py",
"models/dbm/layer.py",
"models/dbm/__init__.py",
"models/dbm/ising.py",
"models/dbm/inference_procedure.py",
"models/differentiable_sparse_coding.py",
"models/local_coordinate_coding.py",
"models/mnd.py",
"models/s3c.py",
"models/autoencoder.py",
"tests/test_monitor.py",
"kmeans.py",
"packaged_dependencies/theano_linear/conv2d.py",
"packaged_dependencies/theano_linear/imaging.py",
"packaged_dependencies/theano_linear/pyramid.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"test_localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/localdot.py",
"packaged_dependencies/theano_linear/unshared_conv/"
"unshared_conv.py",
"packaged_dependencies/theano_linear/linear.py",
"packaged_dependencies/theano_linear/test_spconv.py",
"packaged_dependencies/theano_linear/test_matrixmul.py",
"packaged_dependencies/theano_linear/spconv.py",
"expr/tests/test_coding.py",
"expr/tests/test_normalize.py",
"expr/tests/test_stochastic_pool.py",
"expr/stochastic_pool.py",
"expr/sampling.py",
"expr/information_theory.py",
"expr/basic.py",
"testing/datasets.py",
"testing/cost.py",
"gui/graph_2D.py",
"sandbox/cuda_convnet/weight_acts.py",
"sandbox/cuda_convnet/filter_acts.py",
"sandbox/cuda_convnet/tests/test_filter_acts_strided.py",
"sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_filter_acts.py",
"sandbox/cuda_convnet/tests/test_weight_acts_strided.py",
"sandbox/cuda_convnet/tests/test_image_acts_strided.py",
"sandbox/cuda_convnet/tests/test_img_acts.py",
"sandbox/cuda_convnet/tests/test_weight_acts.py",
"sandbox/cuda_convnet/tests/test_stochastic_pool.py",
"sandbox/cuda_convnet/specialized_bench.py",
"sandbox/cuda_convnet/response_norm.py",
"sandbox/cuda_convnet/__init__.py",
"sandbox/cuda_convnet/img_acts.py",
"sandbox/cuda_convnet/convnet_compile.py",
"sandbox/cuda_convnet/base_acts.py",
"sandbox/cuda_convnet/pthreads.py",
"sandbox/cuda_convnet/pool.py",
"sandbox/cuda_convnet/bench.py",
"sandbox/cuda_convnet/stochastic_pool.py",
"sandbox/cuda_convnet/probabilistic_max_pooling.py",
"sandbox/tuple_var.py",
"sandbox/lisa_rl/bandit/average_agent.py",
"sandbox/lisa_rl/bandit/classifier_bandit.py",
"sandbox/lisa_rl/bandit/classifier_agent.py",
"sandbox/lisa_rl/bandit/plot_reward.py",
"config/old_config.py",
"config/yaml_parse.py",
"datasets/utlc.py",
"datasets/mnistplus.py",
"datasets/cos_dataset.py",
"datasets/cifar10.py",
"datasets/svhn.py",
"datasets/tests/test_csv_dataset.py",
"datasets/tests/test_icml07.py",
"datasets/tests/test_utlc.py",
"datasets/preprocessing.py",
"datasets/config.py",
"datasets/icml07.py",
"datasets/filetensor.py",
"datasets/hepatitis.py",
"datasets/wiskott.py",
"datasets/mnist.py",
"datasets/csv_dataset.py",
"datasets/tl_challenge.py",
"datasets/retina.py",
"datasets/ocr.py",
"datasets/stl10.py",
"datasets/vector_spaces_dataset.py",
"datasets/debug.py",
"datasets/binarizer.py",
"utils/utlc.py",
"utils/tests/test_serial.py",
"utils/common_strings.py",
"utils/serial.py",
"utils/mem.py",
"dataset_get/dataset-get.py",
"dataset_get/helper-scripts/make-archive.py",
"dataset_get/dataset_resolver.py",
"monitor.py",
"optimization/batch_gradient_descent.py",
"optimization/minres.py",
"costs/ebm_estimation.py",
"costs/gsn.py",
"costs/mlp/missing_target_cost.py",
"costs/autoencoder.py",
"linear/conv2d.py",
"linear/local_c01b.py",
"linear/linear_transform.py",
"linear/conv2d_c01b.py",
"energy_functions/rbm_energy.py",
"scripts/lcc_tangents/make_dataset.py",
"scripts/pkl_inspector.py",
"scripts/show_binocular_greyscale_examples.py",
"scripts/jobman/tester.py",
"scripts/dbm/show_samples.py",
"scripts/dbm/show_reconstructions.py",
"scripts/dbm/dbm_metrics.py",
"scripts/dbm/top_filters.py",
"scripts/papers/maxout/svhn_preprocessing.py",
"scripts/papers/jia_huang_wkshp_11/fit_final_model.py",
"scripts/papers/jia_huang_wkshp_11/evaluate.py",
"scripts/papers/jia_huang_wkshp_11/extract_features.py",
"scripts/papers/jia_huang_wkshp_11/assemble.py",
"scripts/gpu_pkl_to_cpu_pkl.py",
"scripts/datasets/make_cifar10_whitened.py",
"scripts/datasets/make_cifar100_patches_8x8.py",
"scripts/datasets/make_cifar100_patches.py",
"scripts/datasets/make_cifar10_gcn_whitened.py",
"scripts/datasets/make_cifar100_whitened.py",
"scripts/datasets/make_stl10_patches_8x8.py",
"scripts/datasets/make_cifar100_gcn_whitened.py",
"scripts/datasets/make_stl10_whitened.py",
"scripts/datasets/make_stl10_patches.py",
"scripts/gsn_example.py",
"scripts/tutorials/deep_trainer/run_deep_trainer.py",
"scripts/tutorials/grbm_smd/make_dataset.py",
"scripts/tutorials/grbm_smd/test_grbm_smd.py",
"scripts/icml_2013_wrepl/multimodal/"
"extract_layer_2_kmeans_features.py",
"scripts/icml_2013_wrepl/multimodal/make_submission.py",
"scripts/icml_2013_wrepl/multimodal/lcn.py",
"scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py",
"scripts/icml_2013_wrepl/emotions/emotions_dataset.py",
"scripts/icml_2013_wrepl/emotions/make_submission.py",
"scripts/icml_2013_wrepl/black_box/black_box_dataset.py",
"scripts/icml_2013_wrepl/black_box/make_submission.py",
"scripts/diff_monitor.py",
"corruption.py",
"devtools/nan_guard.py",
"sandbox/lisa_rl/bandit/gaussian_bandit.py",
"config/tests/test_yaml_parse.py",
"utils/iteration.py",
"utils/track_version.py",
"scripts/get_version.py",
"blocks.py",
"training_algorithms/tests/test_bgd.py",
"training_algorithms/tests/test_default.py",
"training_algorithms/learning_rule.py",
"training_algorithms/bgd.py",
"training_algorithms/default.py",
"training_algorithms/training_algorithm.py",
"training_algorithms/sgd.py",
"distributions/tests/test_mnd.py",
"distributions/parzen.py",
"distributions/uniform_hypersphere.py",
"models/setup.py",
"models/independent_multiclass_logistic.py",
"models/softmax_regression.py",
"models/tests/test_svm.py",
"models/tests/test_reflection_clip.py",
"models/tests/test_mlp.py",
"models/tests/test_maxout.py",
"models/tests/test_convelemwise_sigm.py",
"models/mlp.py",
"models/dbm/sampling_procedure.py",
"models/svm.py",
"models/rbm.py",
"models/pca.py",
"tests/test_train.py",
"tests/test_theano.py",
"packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py",
"packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py",
"packaged_dependencies/theano_linear/linearmixin.py",
"packaged_dependencies/theano_linear/util.py",
"packaged_dependencies/theano_linear/__init__.py",
"packaged_dependencies/theano_linear/test_linear.py",
"expr/tests/test_nnet.py",
"expr/nnet.py",
"expr/image.py",
"expr/coding.py",
"expr/normalize.py",
"expr/probabilistic_max_pooling.py",
"testing/tests/test.py",
"testing/skip.py",
"testing/prereqs.py",
"testing/__init__.py",
"gui/get_weights_report.py",
"gui/patch_viewer.py",
"sandbox/cuda_convnet/tests/test_response_norm.py",
"sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py",
"sandbox/cuda_convnet/tests/test_rop_pool.py",
"sandbox/cuda_convnet/tests/test_pool.py",
"sandbox/cuda_convnet/tests/test_common.py",
"sandbox/cuda_convnet/shared_code.py",
"sandbox/cuda_convnet/code_templates.py",
"sandbox/cuda_convnet/debug.py",
"sandbox/lisa_rl/bandit/agent.py",
"sandbox/lisa_rl/bandit/algorithm.py",
"sandbox/lisa_rl/bandit/environment.py",
"sandbox/lisa_rl/__init__.py",
"space/__init__.py",
"datasets/tests/test_preprocessing.py",
"datasets/tests/test_mnist.py",
"datasets/tests/test_cifar10.py",
"datasets/tests/test_dense_design_matrix.py",
"datasets/tests/test_vector_spaces_dataset.py",
"datasets/tests/test_npy_npz.py",
"datasets/avicenna.py",
"datasets/iris.py",
"datasets/adult.py",
"datasets/zca_dataset.py",
"datasets/npy_npz.py",
"datasets/control.py",
"datasets/cifar100.py",
"datasets/transformer_dataset.py",
"datasets/dataset.py",
"termination_criteria/__init__.py",
"__init__.py",
"utils/logger.py",
"utils/tests/test_mnist_ubyte.py",
"utils/tests/test_data_specs.py",
"utils/tests/test_bit_strings.py",
"utils/tests/test_iteration.py",
"utils/tests/test_string_utils.py",
"utils/image.py",
"utils/string_utils.py",
"utils/theano_graph.py",
"utils/__init__.py",
"utils/datasets.py",
"utils/data_specs.py",
"utils/insert_along_axis.py",
"utils/environ.py",
"utils/call_check.py",
"utils/python26.py",
"deprecated/classifier.py",
"train.py",
"classifier.py",
"dataset_get/helper-scripts/make-sources.py",
"pca.py",
"optimization/test_linesearch.py",
"optimization/test_minres.py",
"optimization/test_batch_gradient_descent.py",
"optimization/linear_cg.py",
"optimization/test_feature_sign.py",
"optimization/feature_sign.py",
"optimization/test_linear_cg.py",
"optimization/linesearch.py",
"costs/mlp/__init__.py",
"costs/mlp/dropout.py",
"costs/cost.py",
"costs/dbm.py",
"linear/tests/test_conv2d.py",
"linear/tests/test_conv2d_c01b.py",
"linear/matrixmul.py",
"energy_functions/energy_function.py",
"scripts/make_weights_image.py",
"scripts/plot_monitor.py",
"scripts/print_monitor.py",
"scripts/num_parameters.py",
"scripts/benchmark/time_relu.py",
"scripts/jobman/experiment.py",
"scripts/jobman/__init__.py",
"scripts/dbm/show_negative_chains.py",
"scripts/papers/maxout/compute_test_err.py",
"scripts/papers/jia_huang_wkshp_11/npy2mat.py",
"scripts/datasets/step_through_small_norb.py",
"scripts/datasets/step_through_norb_foveated.py",
"scripts/datasets/make_downsampled_stl10.py",
"scripts/datasets/browse_small_norb.py",
"scripts/datasets/make_mnistplus.py",
"scripts/mlp/predict_csv.py",
"scripts/find_gpu_fields.py",
"scripts/tutorials/convolutional_network/tests/test_convnet.py",
"scripts/tutorials/deep_trainer/test_deep_trainer.py",
"scripts/icml_2013_wrepl/multimodal/make_wordlist.py",
"base.py",
"devtools/tests/test_via_pyflakes.py",
"devtools/tests/test_shebangs.py",
"devtools/tests/pep8/pep8.py",
"devtools/tests/test_record.py",
"devtools/tests/docscrape.py",
"devtools/run_pyflakes.py",
"devtools/record.py",
"train_extensions/tests/test_window_flip.py",
"train_extensions/__init__.py",
"train_extensions/window_flip.py",
"train_extensions/best_params.py"
]
whitelist_docstrings = [
'scripts/datasets/step_through_norb_foveated.py',
'blocks.py',
'datasets/hdf5.py',
'rbm_tools.py',
'training_algorithms/tests/test_bgd.py',
'training_algorithms/tests/test_sgd.py',
'training_algorithms/tests/test_default.py',
'training_algorithms/learning_rule.py',
'training_algorithms/bgd.py',
'training_algorithms/default.py',
'training_algorithms/training_algorithm.py',
'training_algorithms/__init__.py',
'training_algorithms/sgd.py',
'distributions/tests/test_mnd.py',
'distributions/multinomial.py',
'distributions/parzen.py',
'distributions/__init__.py',
'distributions/mnd.py',
'distributions/uniform_hypersphere.py',
'models/setup.py',
'models/independent_multiclass_logistic.py',
'models/softmax_regression.py',
'models/sparse_autoencoder.py',
'models/tests/test_svm.py',
'models/tests/test_reflection_clip.py',
'models/tests/test_dbm.py',
'models/tests/test_gsn.py',
'models/tests/test_dropout.py',
'models/tests/test_autoencoder.py',
'models/tests/test_mlp.py',
'models/tests/test_s3c_inference.py',
'models/tests/test_maxout.py',
'models/tests/test_mnd.py',
'models/tests/test_rbm.py',
'models/tests/test_s3c_misc.py',
'models/gsn.py',
'models/dbm/sampling_procedure.py',
'models/dbm/layer.py',
'models/dbm/__init__.py',
'models/dbm/dbm.py',
'models/dbm/ising.py',
'models/dbm/inference_procedure.py',
'models/differentiable_sparse_coding.py',
'models/local_coordinate_coding.py',
'models/maxout.py',
'models/s3c.py',
'models/mnd.py',
'models/svm.py',
'models/rbm.py',
'models/autoencoder.py',
'tests/test_dbm_metrics.py',
'tests/test_monitor.py',
'tests/test_train.py',
'tests/test_theano.py',
'tests/rbm/test_ais.py',
'kmeans.py',
'packaged_dependencies/__init__.py',
'packaged_dependencies/theano_linear/imaging.py',
'packaged_dependencies/theano_linear/unshared_conv/__init__.py',
'packaged_dependencies/theano_linear/unshared_conv/unshared_conv.py',
'packaged_dependencies/theano_linear/linearmixin.py',
'packaged_dependencies/theano_linear/linear.py',
'packaged_dependencies/theano_linear/test_spconv.py',
'expr/activations.py',
'expr/tests/test_probabilistic_max_pooling.py',
'expr/tests/test_preprocessing.py',
'expr/tests/test_nnet.py',
'expr/tests/test_coding.py',
'expr/tests/test_normalize.py',
'expr/tests/test_stochastic_pool.py',
'expr/preprocessing.py',
'expr/nnet.py',
'expr/image.py',
'expr/coding.py',
'expr/__init__.py',
'expr/stochastic_pool.py',
'expr/sampling.py',
'expr/normalize.py',
'expr/probabilistic_max_pooling.py',
'expr/information_theory.py',
'expr/basic.py',
'testing/tests/test.py',
'testing/skip.py',
'testing/prereqs.py',
'testing/__init__.py',
'testing/datasets.py',
'testing/cost.py',
'gui/graph_2D.py',
'gui/get_weights_report.py',
'gui/__init__.py',
'gui/patch_viewer.py',
'scalar.py',
'sandbox/cuda_convnet/weight_acts.py',
'sandbox/cuda_convnet/filter_acts.py',
'sandbox/cuda_convnet/tests/test_filter_acts_strided.py',
'sandbox/cuda_convnet/tests/test_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_filter_acts.py',
'sandbox/cuda_convnet/tests/test_img_acts.py',
'sandbox/cuda_convnet/tests/test_response_norm.py',
'sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py',
'sandbox/cuda_convnet/tests/test_weight_acts.py',
'sandbox/cuda_convnet/tests/test_rop_pool.py',
'sandbox/cuda_convnet/tests/test_pool.py',
'sandbox/cuda_convnet/tests/test_common.py',
'sandbox/cuda_convnet/tests/test_stochastic_pool.py',
'sandbox/cuda_convnet/shared_code.py',
'sandbox/cuda_convnet/__init__.py',
'sandbox/cuda_convnet/img_acts.py',
'sandbox/cuda_convnet/base_acts.py',
'sandbox/cuda_convnet/pool.py',
'sandbox/cuda_convnet/stochastic_pool.py',
'sandbox/cuda_convnet/code_templates.py',
'sandbox/cuda_convnet/probabilistic_max_pooling.py',
'sandbox/tuple_var.py',
'sandbox/__init__.py',
'sandbox/lisa_rl/bandit/simulator.py',
'sandbox/lisa_rl/bandit/agent.py',
'sandbox/lisa_rl/bandit/algorithm.py',
'sandbox/lisa_rl/bandit/environment.py',
'sandbox/lisa_rl/bandit/average_agent.py',
'sandbox/lisa_rl/bandit/classifier_bandit.py',
'sandbox/lisa_rl/bandit/__init__.py',
'sandbox/lisa_rl/bandit/classifier_agent.py',
'sandbox/lisa_rl/bandit/gaussian_bandit.py',
'sandbox/lisa_rl/__init__.py',
'config/old_config.py',
'config/tests/test_yaml_parse.py',
'config/yaml_parse.py',
'space/tests/test_space.py',
'space/__init__.py',
'datasets/norb.py',
'datasets/utlc.py',
'datasets/mnistplus.py',
'datasets/cos_dataset.py',
'datasets/cifar10.py',
'datasets/svhn.py',
'datasets/tests/test_preprocessing.py',
'datasets/tests/test_mnist.py',
'datasets/tests/test_imports.py',
'datasets/tests/test_cifar10.py',
'datasets/tests/test_norb.py',
'datasets/tests/test_dense_design_matrix.py',
'datasets/tests/test_vector_spaces_dataset.py',
'datasets/tests/test_four_regions.py',
'datasets/tests/test_csv_dataset.py',
'datasets/tests/test_icml07.py',
'datasets/tests/test_utlc.py',
'datasets/preprocessing.py',
'datasets/avicenna.py',
'datasets/iris.py',
'datasets/config.py',
'datasets/dense_design_matrix.py',
'datasets/adult.py',
'datasets/tfd.py',
'datasets/icml07.py',
'datasets/zca_dataset.py',
'datasets/filetensor.py',
'datasets/npy_npz.py',
'datasets/hepatitis.py',
'datasets/wiskott.py',
'datasets/control.py',
'datasets/exc.py',
'datasets/__init__.py',
'datasets/mnist.py',
'datasets/sparse_dataset.py',
'datasets/csv_dataset.py',
'datasets/cifar100.py',
'datasets/tl_challenge.py',
'datasets/transformer_dataset.py',
'datasets/norb_small.py',
'datasets/retina.py',
'datasets/dataset.py',
'datasets/ocr.py',
'datasets/stl10.py',
'datasets/matlab_dataset.py',
'datasets/vector_spaces_dataset.py',
'datasets/four_regions.py',
'datasets/debug.py',
'datasets/binarizer.py',
'termination_criteria/__init__.py',
'__init__.py',
'utils/utlc.py',
'utils/setup.py',
'utils/compile.py',
'utils/logger.py',
'utils/general.py',
'utils/one_hot.py',
'utils/testing.py',
'utils/tests/test_mnist_ubyte.py',
'utils/tests/test_data_specs.py',
'utils/tests/test_video.py',
'utils/tests/test_bit_strings.py',
'utils/tests/test_one_hot.py',
'utils/tests/test_rng.py',
'utils/tests/test_pooling.py',
'utils/tests/test_iteration.py',
'utils/tests/test_string_utils.py',
'utils/tests/test_insert_along_axis.py',
'utils/tests/test_utlc.py',
'utils/tests/test_compile.py',
'utils/tests/test_key_aware.py',
'utils/key_aware.py',
'utils/image.py',
'utils/video.py',
'utils/string_utils.py',
'utils/bit_strings.py',
'utils/iteration.py',
'utils/pooling.py',
'utils/theano_graph.py',
'utils/exc.py',
'utils/common_strings.py',
'utils/datasets.py',
'utils/serial.py',
'utils/data_specs.py',
'utils/shell.py',
'utils/rng.py',
'utils/insert_along_axis.py',
'utils/environ.py',
'utils/call_check.py',
'utils/mnist_ubyte.py',
'utils/track_version.py',
'utils/mem.py',
'utils/python26.py',
'utils/timing.py',
'deprecated/__init__.py',
'deprecated/classifier.py',
'train.py',
'format/tests/test_target_format.py',
'format/__init__.py',
'dataset_get/dataset-get.py',
'dataset_get/helper-scripts/make-sources.py',
'dataset_get/helper-scripts/make-archive.py',
'dataset_get/dataset_resolver.py',
'pca.py',
'monitor.py',
'optimization/batch_gradient_descent.py',
'optimization/__init__.py',
'optimization/test_batch_gradient_descent.py',
'optimization/linear_cg.py',
'optimization/minres.py',
'optimization/test_feature_sign.py',
'optimization/feature_sign.py',
'optimization/linesearch.py',
'costs/tests/test_lp_penalty_cost.py',
'costs/gsn.py',
'costs/__init__.py',
'costs/mlp/__init__.py',
'costs/mlp/dropout.py',
'costs/mlp/missing_target_cost.py',
'costs/dbm.py',
'costs/autoencoder.py',
'linear/conv2d.py',
'linear/tests/test_matrixmul.py',
'linear/local_c01b.py',
'linear/matrixmul.py',
'linear/__init__.py',
'linear/linear_transform.py',
'linear/conv2d_c01b.py',
'energy_functions/tests/__init__.py',
'energy_functions/rbm_energy.py',
'energy_functions/__init__.py',
'energy_functions/energy_function.py',
'scripts/plot_monitor.py',
'scripts/print_model.py',
'scripts/tests/__init__.py',
'scripts/pkl_inspector.py',
'scripts/get_version.py',
'scripts/print_monitor.py',
'scripts/show_binocular_greyscale_examples.py',
'scripts/num_parameters.py',
'scripts/jobman/tester.py',
'scripts/jobman/experiment.py',
'scripts/jobman/__init__.py',
'scripts/dbm/__init__.py',
'scripts/dbm/dbm_metrics.py',
'scripts/papers/__init__.py',
'scripts/papers/jia_huang_wkshp_11/extract_features.py',
'scripts/print_channel_doc.py',
'scripts/gpu_pkl_to_cpu_pkl.py',
'scripts/datasets/step_through_small_norb.py',
'scripts/datasets/download_mnist.py',
'scripts/datasets/browse_small_norb.py',
'scripts/datasets/make_mnistplus.py',
'scripts/__init__.py',
'scripts/gsn_example.py',
'scripts/mlp/predict_csv.py',
'scripts/mlp/__init__.py',
'scripts/find_gpu_fields.py',
'scripts/tutorials/dbm_demo/train_dbm.py',
'scripts/tutorials/dbm_demo/__init__.py',
'scripts/tutorials/tests/test_dbm.py',
'scripts/tutorials/tests/test_mlp_nested.py',
'scripts/tutorials/multilayer_perceptron/tests/test_mlp.py',
'scripts/tutorials/convolutional_network/tests/test_convnet.py',
'scripts/tutorials/softmax_regression/tests/test_softmaxreg.py',
'scripts/tutorials/deep_trainer/__init__.py',
'scripts/tutorials/deep_trainer/run_deep_trainer.py',
'scripts/tutorials/grbm_smd/make_dataset.py',
'scripts/tutorials/grbm_smd/__init__.py',
'scripts/tutorials/grbm_smd/test_grbm_smd.py',
'scripts/tutorials/__init__.py',
'scripts/tutorials/jobman_demo/utils.py',
'scripts/tutorials/jobman_demo/__init__.py',
'scripts/tutorials/stacked_autoencoders/tests/test_dae.py',
'scripts/icml_2013_wrepl/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_layer_2_kmeans_features.py',
'scripts/icml_2013_wrepl/multimodal/make_submission.py',
'scripts/icml_2013_wrepl/multimodal/lcn.py',
'scripts/icml_2013_wrepl/multimodal/__init__.py',
'scripts/icml_2013_wrepl/multimodal/extract_kmeans_features.py',
'scripts/icml_2013_wrepl/emotions/emotions_dataset.py',
'scripts/icml_2013_wrepl/emotions/make_submission.py',
'scripts/icml_2013_wrepl/emotions/__init__.py',
'scripts/icml_2013_wrepl/black_box/black_box_dataset.py',
'scripts/icml_2013_wrepl/black_box/make_submission.py',
'scripts/icml_2013_wrepl/black_box/__init__.py',
'scripts/diff_monitor.py',
'base.py',
'devtools/tests/test_via_pyflakes.py',
'devtools/tests/test_shebangs.py',
'devtools/tests/test_record.py',
'devtools/tests/__init__.py',
'devtools/tests/docscrape.py',
'devtools/run_pyflakes.py',
'devtools/nan_guard.py',
'devtools/__init__.py',
'devtools/record.py',
'train_extensions/best_params.py',
'corruption.py',
'datasets/tests/test_tl_challenge.py',
'datasets/tests/test_tfd.py',
'datasets/tests/test_npy_npz.py',
'linear/tests/test_conv2d.py',
'devtools/tests/pep8/pep8.py',
'devtools/tests/pep8/__init__.py']
# add files which have long execution time to whitelist_docstrings
whitelist_docstrings.extend([
'sandbox/cuda_convnet/debug.py',
'energy_functions/tests/test_rbm_energy.py',
'scripts/icml_2013_wrepl/multimodal/make_wordlist.py',
'scripts/make_weights_image.py', 'costs/ebm_estimation.py',
'classifier.py', 'scripts/lcc_tangents/make_dataset.py',
'scripts/datasets/make_cifar10_whitened.py',
'scripts/datasets/make_cifar100_patches.py',
'scripts/datasets/make_cifar10_gcn_whitened.py',
'scripts/datasets/make_stl10_patches_8x8.py',
'scripts/datasets/make_cifar100_gcn_whitened.py',
'scripts/datasets/make_stl10_whitened.py',
'scripts/datasets/make_stl10_patches.py'])
# add files which fail to run to whitelist_docstrings
whitelist_docstrings.extend([
'training_algorithms/tests/test_learning_rule.py',
'models/pca.py',
'datasets/tests/test_hdf5.py',
'linear/tests/test_conv2d_c01b.py',
'packaged_dependencies/theano_linear/conv2d.py',
'packaged_dependencies/theano_linear/pyramid.py',
'packaged_dependencies/theano_linear/unshared_conv/gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/'
'test_gpu_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/test_localdot.py',
'packaged_dependencies/theano_linear/unshared_conv/test_unshared_conv.py',
'packaged_dependencies/theano_linear/unshared_conv/localdot.py',
'packaged_dependencies/theano_linear/util.py',
'packaged_dependencies/theano_linear/__init__.py',
'packaged_dependencies/theano_linear/test_matrixmul.py',
'packaged_dependencies/theano_linear/test_linear.py',
'packaged_dependencies/theano_linear/spconv.py',
'sandbox/cuda_convnet/tests/test_weight_acts_strided.py',
'sandbox/cuda_convnet/tests/test_image_acts_strided.py',
'sandbox/cuda_convnet/specialized_bench.py',
'sandbox/cuda_convnet/response_norm.py',
'sandbox/cuda_convnet/convnet_compile.py',
'sandbox/cuda_convnet/pthreads.py',
'sandbox/cuda_convnet/bench.py',
'sandbox/lisa_rl/bandit/plot_reward.py',
'sandbox/lisa_rl/bandit/simulate.py',
'config/__init__.py',
'utils/__init__.py',
'optimization/test_linesearch.py',
'optimization/test_minres.py',
'optimization/test_linear_cg.py',
'scripts/dbm/show_samples.py',
'scripts/dbm/show_reconstructions.py',
'scripts/dbm/top_filters.py',
'scripts/dbm/show_negative_chains.py',
'scripts/papers/maxout/svhn_preprocessing.py',
'scripts/papers/maxout/compute_test_err.py',
'scripts/papers/jia_huang_wkshp_11/fit_final_model.py',
'scripts/papers/jia_huang_wkshp_11/evaluate.py',
'scripts/papers/jia_huang_wkshp_11/npy2mat.py',
'scripts/papers/jia_huang_wkshp_11/assemble.py',
'scripts/datasets/make_cifar100_patches_8x8.py',
'scripts/datasets/make_downsampled_stl10.py',
'scripts/datasets/make_cifar100_whitened.py',
'scripts/tutorials/deep_trainer/test_deep_trainer.py',
'scripts/icml_2013_wrepl/black_box/learn_zca.py',
'train_extensions/tests/test_window_flip.py',
'train_extensions/window_flip.py',
'linear/tests/test_local_c01b.py'])
def test_format_pep8():
"""
Test if pep8 is respected.
"""
pep8_checker = StyleGuide()
files_to_check = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_pep8:
continue
else:
files_to_check.append(path)
report = pep8_checker.check_files(files_to_check)
if report.total_errors > 0:
raise AssertionError("PEP8 Format not respected")
def print_files_information_pep8():
"""
Print the list of files which can be removed from the whitelist and the
list of files which do not respect PEP8 formatting that aren't in the
whitelist
"""
infracting_files = []
non_infracting_files = []
pep8_checker = StyleGuide(quiet=True)
for path in list_files(".py"):
number_of_infractions = pep8_checker.input_file(path)
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if number_of_infractions > 0:
if rel_path not in whitelist_pep8:
infracting_files.append(path)
else:
if rel_path in whitelist_pep8:
non_infracting_files.append(path)
print "Files that must be corrected or added to whitelist:"
for file in infracting_files:
print file
print "Files that can be removed from whitelist:"
for file in non_infracting_files:
print file
def test_format_docstrings():
"""
Test if docstrings are well formatted.
"""
try:
verify_format_docstrings()
except SkipTest, e:
import traceback
traceback.print_exc(e)
raise AssertionError(
"Some file raised SkipTest on import, and inadvertently"
" canceled the documentation testing."
)
def verify_format_docstrings():
"""
Implementation of `test_format_docstrings`. The implementation is
factored out so it can be placed inside a guard against SkipTest.
"""
format_infractions = []
for path in list_files(".py"):
rel_path = os.path.relpath(path, pylearn2.__path__[0])
if rel_path in whitelist_docstrings:
continue
try:
format_infractions.extend(docstring_errors(path))
except StandardError as e:
format_infractions.append(["%s failed to run so format cannot "
"be checked. Error message:\n %s" %
(rel_path, e)])
if len(format_infractions) > 0:
msg = "\n".join(':'.join(line) for line in format_infractions)
raise AssertionError("Docstring format not respected:\n%s" % msg)
if __name__ == "__main__":
print_files_information_pep8()
|
|
'''
Nearly all of the estimations for the paper "The Distribution of Wealth and the
Marginal Propensity to Consume", by Chris Carroll, Jiri Slacalek, Kiichi Tokuoka,
and Matthew White. The micro model is a very slightly altered version of
ConsIndShockModel; the macro model is ConsAggShockModel. See SetupParamsCSTW
for parameters and execution options.
'''
# Import the HARK library. The assumption is that this code is in a folder
# contained in the HARK folder. Also import ConsumptionSavingModel
import sys
import os
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../ConsumptionSaving'))
import numpy as np
from copy import deepcopy
from time import time
from HARKutilities import approxMeanOneLognormal, combineIndepDstns, approxUniform, calcWeightedAvg, \
getPercentiles, getLorenzShares, calcSubpopAvg
from HARKsimulation import drawDiscrete, drawMeanOneLognormal
from HARKcore import AgentType
from HARKparallel import multiThreadCommandsFake
import SetupParamsCSTW as Params
import ConsIndShockModel as Model
from ConsAggShockModel import CobbDouglasEconomy, AggShockConsumerType
from scipy.optimize import golden, brentq
import matplotlib.pyplot as plt
import csv
# =================================================================
# ====== Make an extension of the basic ConsumerType ==============
# =================================================================
class cstwMPCagent(Model.IndShockConsumerType):
'''
A consumer type in the cstwMPC model; a slight modification of base ConsumerType.
'''
def __init__(self,time_flow=True,**kwds):
'''
Make a new consumer type for the cstwMPC model.
Parameters
----------
time_flow : boolean
Indictator for whether time is "flowing" forward for this agent.
**kwds : keyword arguments
Any number of keyword arguments of the form key=value. Each value
will be assigned to the attribute named in self.
Returns
-------
new instance of cstwMPCagent
'''
# Initialize a basic AgentType
AgentType.__init__(self,solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_),
time_flow=time_flow,pseudo_terminal=False,**kwds)
# Add consumer-type specific objects, copying to create independent versions
self.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)
self.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)
self.solveOnePeriod = Model.solveConsIndShock
self.update()
def simulateCSTW(self):
'''
The simulation method for the no aggregate shocks version of the model.
Initializes the agent type, simulates a history of state and control
variables, and stores the wealth history in self.W_history and the
annualized MPC history in self.kappa_history.
Parameters
----------
none
Returns
-------
none
'''
self.initializeSim()
self.simConsHistory()
self.W_history = self.pHist*self.bHist/self.Rfree
if Params.do_lifecycle:
self.W_history = self.W_history*self.cohort_scale
self.kappa_history = 1.0 - (1.0 - self.MPChist)**4
def update(self):
'''
Update the income process, the assets grid, and the terminal solution.
Parameters
----------
none
Returns
-------
none
'''
orig_flow = self.time_flow
if self.cycles == 0: # hacky fix for labor supply l_bar
self.updateIncomeProcessAlt()
else:
self.updateIncomeProcess()
self.updateAssetsGrid()
self.updateSolutionTerminal()
self.timeFwd()
self.resetRNG()
if self.cycles > 0:
self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn,
tax_rate=self.tax_rate,
T_retire=self.T_retire,
unemployed_indices=range(0,(self.TranShkCount+1)*
self.PermShkCount,self.TranShkCount+1))
self.makeIncShkHist()
if not orig_flow:
self.timeRev()
def updateIncomeProcessAlt(self):
'''
An alternative method for constructing the income process in the infinite
horizon model, where the labor supply l_bar creates a small oddity.
Parameters
----------
none
Returns
-------
none
'''
tax_rate = (self.IncUnemp*self.UnempPrb)/(self.l_bar*(1.0-self.UnempPrb))
TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount,sigma=self.TranShkStd[0],tail_N=0))
TranShkDstn[0] = np.insert(TranShkDstn[0]*(1.0-self.UnempPrb),0,self.UnempPrb)
TranShkDstn[1] = np.insert(self.l_bar*TranShkDstn[1]*(1.0-tax_rate),0,self.IncUnemp)
PermShkDstn = approxMeanOneLognormal(self.PermShkCount,sigma=self.PermShkStd[0],tail_N=0)
self.IncomeDstn = [combineIndepDstns(PermShkDstn,TranShkDstn)]
self.TranShkDstn = TranShkDstn
self.PermShkDstn = PermShkDstn
self.addToTimeVary('IncomeDstn')
def assignBetaDistribution(type_list,DiscFac_list):
'''
Assigns the discount factors in DiscFac_list to the types in type_list. If
there is heterogeneity beyond the discount factor, then the same DiscFac is
assigned to consecutive types.
Parameters
----------
type_list : [cstwMPCagent]
The list of types that should be assigned discount factors.
DiscFac_list : [float] or np.array
List of discount factors to assign to the types.
Returns
-------
none
'''
DiscFac_N = len(DiscFac_list)
type_N = len(type_list)/DiscFac_N
j = 0
b = 0
while j < len(type_list):
t = 0
while t < type_N:
type_list[j](DiscFac = DiscFac_list[b])
t += 1
j += 1
b += 1
# =================================================================
# ====== Make some data analysis and reporting tools ==============
# =================================================================
def calculateKYratioDifference(sim_wealth,weights,total_output,target_KY):
'''
Calculates the absolute distance between the simulated capital-to-output
ratio and the true U.S. level.
Parameters
----------
sim_wealth : numpy.array
Array with simulated wealth values.
weights : numpy.array
List of weights for each row of sim_wealth.
total_output : float
Denominator for the simulated K/Y ratio.
target_KY : float
Actual U.S. K/Y ratio to match.
Returns
-------
distance : float
Absolute distance between simulated and actual K/Y ratios.
'''
sim_K = calcWeightedAvg(sim_wealth,weights)/(Params.l_bar)
sim_KY = sim_K/total_output
distance = (sim_KY - target_KY)**1.0
return distance
def calculateLorenzDifference(sim_wealth,weights,percentiles,target_levels):
'''
Calculates the sum of squared differences between the simulatedLorenz curve
at the specified percentile levels and the target Lorenz levels.
Parameters
----------
sim_wealth : numpy.array
Array with simulated wealth values.
weights : numpy.array
List of weights for each row of sim_wealth.
percentiles : [float]
Points in the distribution of wealth to match.
target_levels : np.array
Actual U.S. Lorenz curve levels at the specified percentiles.
Returns
-------
distance : float
Sum of squared distances between simulated and target Lorenz curves.
'''
sim_lorenz = getLorenzShares(sim_wealth,weights=weights,percentiles=percentiles)
distance = sum((100*sim_lorenz-100*target_levels)**2)
return distance
# Define the main simulation process for matching the K/Y ratio
def simulateKYratioDifference(DiscFac,nabla,N,type_list,weights,total_output,target):
'''
Assigns a uniform distribution over DiscFac with width 2*nabla and N points, then
solves and simulates all agent types in type_list and compares the simuated
K/Y ratio to the target K/Y ratio.
Parameters
----------
DiscFac : float
Center of the uniform distribution of discount factors.
nabla : float
Width of the uniform distribution of discount factors.
N : int
Number of discrete consumer types.
type_list : [cstwMPCagent]
List of agent types to solve and simulate after assigning discount factors.
weights : np.array
Age-conditional array of population weights.
total_output : float
Total output of the economy, denominator for the K/Y calculation.
target : float
Target level of capital-to-output ratio.
Returns
-------
my_diff : float
Difference between simulated and target capital-to-output ratios.
'''
if type(DiscFac) in (list,np.ndarray,np.array):
DiscFac = DiscFac[0]
DiscFac_list = approxUniform(N,DiscFac-nabla,DiscFac+nabla)[1] # only take values, not probs
assignBetaDistribution(type_list,DiscFac_list)
multiThreadCommandsFake(type_list,beta_point_commands)
my_diff = calculateKYratioDifference(np.vstack((this_type.W_history for this_type in type_list)),
np.tile(weights/float(N),N),total_output,target)
return my_diff
mystr = lambda number : "{:.3f}".format(number)
'''
Truncates a float at exactly three decimal places when displaying as a string.
'''
def makeCSTWresults(DiscFac,nabla,save_name=None):
'''
Produces a variety of results for the cstwMPC paper (usually after estimating).
Parameters
----------
DiscFac : float
Center of the uniform distribution of discount factors
nabla : float
Width of the uniform distribution of discount factors
save_name : string
Name to save the calculated results, for later use in producing figures
and tables, etc.
Returns
-------
none
'''
DiscFac_list = approxUniform(N=Params.pref_type_count,bot=DiscFac-nabla,top=DiscFac+nabla)[1]
assignBetaDistribution(est_type_list,DiscFac_list)
multiThreadCommandsFake(est_type_list,beta_point_commands)
lorenz_distance = np.sqrt(betaDistObjective(nabla))
makeCSTWstats(DiscFac,nabla,est_type_list,Params.age_weight_all,lorenz_distance,save_name)
def makeCSTWstats(DiscFac,nabla,this_type_list,age_weight,lorenz_distance=0.0,save_name=None):
'''
Displays (and saves) a bunch of statistics. Separate from makeCSTWresults()
for compatibility with the aggregate shock model.
Parameters
----------
DiscFac : float
Center of the uniform distribution of discount factors
nabla : float
Width of the uniform distribution of discount factors
this_type_list : [cstwMPCagent]
List of agent types in the economy.
age_weight : np.array
Age-conditional array of weights for the wealth data.
lorenz_distance : float
Distance between simulated and actual Lorenz curves, for display.
save_name : string
Name to save the calculated results, for later use in producing figures
and tables, etc.
Returns
-------
none
'''
sim_length = this_type_list[0].sim_periods
sim_wealth = (np.vstack((this_type.W_history for this_type in this_type_list))).flatten()
sim_wealth_short = (np.vstack((this_type.W_history[0:sim_length,:] for this_type in this_type_list))).flatten()
sim_kappa = (np.vstack((this_type.kappa_history for this_type in this_type_list))).flatten()
sim_income = (np.vstack((this_type.pHist[0:sim_length,:]*np.asarray(this_type.TranShkHist[0:sim_length,:]) for this_type in this_type_list))).flatten()
sim_ratio = (np.vstack((this_type.W_history[0:sim_length,:]/this_type.pHist[0:sim_length,:] for this_type in this_type_list))).flatten()
if Params.do_lifecycle:
sim_unemp = (np.vstack((np.vstack((this_type.IncUnemp == this_type.TranShkHist[0:Params.working_T,:],np.zeros((Params.retired_T+1,this_type_list[0].Nagents),dtype=bool))) for this_type in this_type_list))).flatten()
sim_emp = (np.vstack((np.vstack((this_type.IncUnemp != this_type.TranShkHist[0:Params.working_T,:],np.zeros((Params.retired_T+1,this_type_list[0].Nagents),dtype=bool))) for this_type in this_type_list))).flatten()
sim_ret = (np.vstack((np.vstack((np.zeros((Params.working_T,this_type_list[0].Nagents),dtype=bool),np.ones((Params.retired_T+1,this_type_list[0].Nagents),dtype=bool))) for this_type in this_type_list))).flatten()
else:
sim_unemp = np.vstack((this_type.IncUnemp == this_type.TranShkHist[0:sim_length,:] for this_type in this_type_list)).flatten()
sim_emp = np.vstack((this_type.IncUnemp != this_type.TranShkHist[0:sim_length,:] for this_type in this_type_list)).flatten()
sim_ret = np.zeros(sim_emp.size,dtype=bool)
sim_weight_all = np.tile(np.repeat(age_weight,this_type_list[0].Nagents),Params.pref_type_count)
if Params.do_beta_dist and Params.do_lifecycle:
kappa_mean_by_age_type = (np.mean(np.vstack((this_type.kappa_history for this_type in this_type_list)),axis=1)).reshape((Params.pref_type_count*3,DropoutType.T_total+1))
kappa_mean_by_age_pref = np.zeros((Params.pref_type_count,DropoutType.T_total+1)) + np.nan
for j in range(Params.pref_type_count):
kappa_mean_by_age_pref[j,] = Params.d_pct*kappa_mean_by_age_type[3*j+0,] + Params.h_pct*kappa_mean_by_age_type[3*j+1,] + Params.c_pct*kappa_mean_by_age_type[3*j+2,]
kappa_mean_by_age = np.mean(kappa_mean_by_age_pref,axis=0)
kappa_lo_beta_by_age = kappa_mean_by_age_pref[0,:]
kappa_hi_beta_by_age = kappa_mean_by_age_pref[Params.pref_type_count-1,:]
lorenz_fig_data = makeLorenzFig(Params.SCF_wealth,Params.SCF_weights,sim_wealth,sim_weight_all)
mpc_fig_data = makeMPCfig(sim_kappa,sim_weight_all)
kappa_all = calcWeightedAvg(np.vstack((this_type.kappa_history for this_type in this_type_list)),np.tile(age_weight/float(Params.pref_type_count),Params.pref_type_count))
kappa_unemp = np.sum(sim_kappa[sim_unemp]*sim_weight_all[sim_unemp])/np.sum(sim_weight_all[sim_unemp])
kappa_emp = np.sum(sim_kappa[sim_emp]*sim_weight_all[sim_emp])/np.sum(sim_weight_all[sim_emp])
kappa_ret = np.sum(sim_kappa[sim_ret]*sim_weight_all[sim_ret])/np.sum(sim_weight_all[sim_ret])
my_cutoffs = [(0.99,1),(0.9,1),(0.8,1),(0.6,0.8),(0.4,0.6),(0.2,0.4),(0.0,0.2)]
kappa_by_ratio_groups = calcSubpopAvg(sim_kappa,sim_ratio,my_cutoffs,sim_weight_all)
kappa_by_income_groups = calcSubpopAvg(sim_kappa,sim_income,my_cutoffs,sim_weight_all)
quintile_points = getPercentiles(sim_wealth_short,weights=sim_weight_all,percentiles=[0.2, 0.4, 0.6, 0.8])
wealth_quintiles = np.ones(sim_wealth_short.size,dtype=int)
wealth_quintiles[sim_wealth_short > quintile_points[0]] = 2
wealth_quintiles[sim_wealth_short > quintile_points[1]] = 3
wealth_quintiles[sim_wealth_short > quintile_points[2]] = 4
wealth_quintiles[sim_wealth_short > quintile_points[3]] = 5
MPC_cutoff = getPercentiles(sim_kappa,weights=sim_weight_all,percentiles=[2.0/3.0])
these_quintiles = wealth_quintiles[sim_kappa > MPC_cutoff]
these_weights = sim_weight_all[sim_kappa > MPC_cutoff]
hand_to_mouth_total = np.sum(these_weights)
hand_to_mouth_pct = []
for q in range(5):
hand_to_mouth_pct.append(np.sum(these_weights[these_quintiles == (q+1)])/hand_to_mouth_total)
results_string = 'Estimate is DiscFac=' + str(DiscFac) + ', nabla=' + str(nabla) + '\n'
results_string += 'Lorenz distance is ' + str(lorenz_distance) + '\n'
results_string += 'Average MPC for all consumers is ' + mystr(kappa_all) + '\n'
results_string += 'Average MPC in the top percentile of W/Y is ' + mystr(kappa_by_ratio_groups[0]) + '\n'
results_string += 'Average MPC in the top decile of W/Y is ' + mystr(kappa_by_ratio_groups[1]) + '\n'
results_string += 'Average MPC in the top quintile of W/Y is ' + mystr(kappa_by_ratio_groups[2]) + '\n'
results_string += 'Average MPC in the second quintile of W/Y is ' + mystr(kappa_by_ratio_groups[3]) + '\n'
results_string += 'Average MPC in the middle quintile of W/Y is ' + mystr(kappa_by_ratio_groups[4]) + '\n'
results_string += 'Average MPC in the fourth quintile of W/Y is ' + mystr(kappa_by_ratio_groups[5]) + '\n'
results_string += 'Average MPC in the bottom quintile of W/Y is ' + mystr(kappa_by_ratio_groups[6]) + '\n'
results_string += 'Average MPC in the top percentile of y is ' + mystr(kappa_by_income_groups[0]) + '\n'
results_string += 'Average MPC in the top decile of y is ' + mystr(kappa_by_income_groups[1]) + '\n'
results_string += 'Average MPC in the top quintile of y is ' + mystr(kappa_by_income_groups[2]) + '\n'
results_string += 'Average MPC in the second quintile of y is ' + mystr(kappa_by_income_groups[3]) + '\n'
results_string += 'Average MPC in the middle quintile of y is ' + mystr(kappa_by_income_groups[4]) + '\n'
results_string += 'Average MPC in the fourth quintile of y is ' + mystr(kappa_by_income_groups[5]) + '\n'
results_string += 'Average MPC in the bottom quintile of y is ' + mystr(kappa_by_income_groups[6]) + '\n'
results_string += 'Average MPC for the employed is ' + mystr(kappa_emp) + '\n'
results_string += 'Average MPC for the unemployed is ' + mystr(kappa_unemp) + '\n'
results_string += 'Average MPC for the retired is ' + mystr(kappa_ret) + '\n'
results_string += 'Of the population with the 1/3 highest MPCs...' + '\n'
results_string += mystr(hand_to_mouth_pct[0]*100) + '% are in the bottom wealth quintile,' + '\n'
results_string += mystr(hand_to_mouth_pct[1]*100) + '% are in the second wealth quintile,' + '\n'
results_string += mystr(hand_to_mouth_pct[2]*100) + '% are in the third wealth quintile,' + '\n'
results_string += mystr(hand_to_mouth_pct[3]*100) + '% are in the fourth wealth quintile,' + '\n'
results_string += 'and ' + mystr(hand_to_mouth_pct[4]*100) + '% are in the top wealth quintile.' + '\n'
print(results_string)
if save_name is not None:
with open('./Results/' + save_name + 'LorenzFig.txt','w') as f:
my_writer = csv.writer(f, delimiter='\t',)
for j in range(len(lorenz_fig_data[0])):
my_writer.writerow([lorenz_fig_data[0][j], lorenz_fig_data[1][j], lorenz_fig_data[2][j]])
f.close()
with open('./Results/' + save_name + 'MPCfig.txt','w') as f:
my_writer = csv.writer(f, delimiter='\t')
for j in range(len(mpc_fig_data[0])):
my_writer.writerow([lorenz_fig_data[0][j], mpc_fig_data[1][j]])
f.close()
if Params.do_beta_dist and Params.do_lifecycle:
with open('./Results/' + save_name + 'KappaByAge.txt','w') as f:
my_writer = csv.writer(f, delimiter='\t')
for j in range(len(kappa_mean_by_age)):
my_writer.writerow([kappa_mean_by_age[j], kappa_lo_beta_by_age[j], kappa_hi_beta_by_age[j]])
f.close()
with open('./Results/' + save_name + 'Results.txt','w') as f:
f.write(results_string)
f.close()
def makeLorenzFig(real_wealth,real_weights,sim_wealth,sim_weights):
'''
Produces a Lorenz curve for the distribution of wealth, comparing simulated
to actual data. A sub-function of makeCSTWresults().
Parameters
----------
real_wealth : np.array
Data on household wealth.
real_weights : np.array
Weighting array of the same size as real_wealth.
sim_wealth : np.array
Simulated wealth holdings of many households.
sim_weights :np.array
Weighting array of the same size as sim_wealth.
Returns
-------
these_percents : np.array
An array of percentiles of households, by wealth.
real_lorenz : np.array
Lorenz shares for real_wealth corresponding to these_percents.
sim_lorenz : np.array
Lorenz shares for sim_wealth corresponding to these_percents.
'''
these_percents = np.linspace(0.0001,0.9999,201)
real_lorenz = getLorenzShares(real_wealth,weights=real_weights,percentiles=these_percents)
sim_lorenz = getLorenzShares(sim_wealth,weights=sim_weights,percentiles=these_percents)
plt.plot(100*these_percents,real_lorenz,'-k',linewidth=1.5)
plt.plot(100*these_percents,sim_lorenz,'--k',linewidth=1.5)
plt.xlabel('Wealth percentile',fontsize=14)
plt.ylabel('Cumulative wealth ownership',fontsize=14)
plt.title('Simulated vs Actual Lorenz Curves',fontsize=16)
plt.legend(('Actual','Simulated'),loc=2,fontsize=12)
plt.ylim(-0.01,1)
plt.show()
return (these_percents,real_lorenz,sim_lorenz)
def makeMPCfig(kappa,weights):
'''
Plot the CDF of the marginal propensity to consume. A sub-function of makeCSTWresults().
Parameters
----------
kappa : np.array
Array of (annualized) marginal propensities to consume for the economy.
weights : np.array
Age-conditional weight array for the data in kappa.
Returns
-------
these_percents : np.array
Array of percentiles of the marginal propensity to consume.
kappa_percentiles : np.array
Array of MPCs corresponding to the percentiles in these_percents.
'''
these_percents = np.linspace(0.0001,0.9999,201)
kappa_percentiles = getPercentiles(kappa,weights,percentiles=these_percents)
plt.plot(kappa_percentiles,these_percents,'-k',linewidth=1.5)
plt.xlabel('Marginal propensity to consume',fontsize=14)
plt.ylabel('Cumulative probability',fontsize=14)
plt.title('CDF of the MPC',fontsize=16)
plt.show()
return (these_percents,kappa_percentiles)
def calcKappaMean(DiscFac,nabla):
'''
Calculates the average MPC for the given parameters. This is a very small
sub-function of sensitivityAnalysis.
Parameters
----------
DiscFac : float
Center of the uniform distribution of discount factors
nabla : float
Width of the uniform distribution of discount factors
Returns
-------
kappa_all : float
Average marginal propensity to consume in the population.
'''
DiscFac_list = approxUniform(N=Params.pref_type_count,bot=DiscFac-nabla,top=DiscFac+nabla)[1]
assignBetaDistribution(est_type_list,DiscFac_list)
multiThreadCommandsFake(est_type_list,beta_point_commands)
kappa_all = calcWeightedAvg(np.vstack((this_type.kappa_history for this_type in est_type_list)),
np.tile(Params.age_weight_all/float(Params.pref_type_count),
Params.pref_type_count))
return kappa_all
def sensitivityAnalysis(parameter,values,is_time_vary):
'''
Perform a sensitivity analysis by varying a chosen parameter over given values
and re-estimating the model at each. Only works for perpetual youth version.
Saves numeric results in a file named SensitivityPARAMETER.txt.
Parameters
----------
parameter : string
Name of an attribute/parameter of cstwMPCagent on which to perform a
sensitivity analysis. The attribute should be a single float.
values : [np.array]
Array of values that the parameter should take on in the analysis.
is_time_vary : boolean
Indicator for whether the parameter of analysis is time_varying (i.e.
is an element of cstwMPCagent.time_vary). While the sensitivity analysis
should only be used for the perpetual youth model, some parameters are
still considered "time varying" in the consumption-saving model and
are encapsulated in a (length=1) list.
Returns
-------
none
'''
fit_list = []
DiscFac_list = []
nabla_list = []
kappa_list = []
for value in values:
print('Now estimating model with ' + parameter + ' = ' + str(value))
Params.diff_save = 1000000.0
old_value_storage = []
for this_type in est_type_list:
old_value_storage.append(getattr(this_type,parameter))
if is_time_vary:
setattr(this_type,parameter,[value])
else:
setattr(this_type,parameter,value)
this_type.update()
output = golden(betaDistObjective,brack=bracket,tol=10**(-4),full_output=True)
nabla = output[0]
fit = output[1]
DiscFac = Params.DiscFac_save
kappa = calcKappaMean(DiscFac,nabla)
DiscFac_list.append(DiscFac)
nabla_list.append(nabla)
fit_list.append(fit)
kappa_list.append(kappa)
with open('./Results/Sensitivity' + parameter + '.txt','w') as f:
my_writer = csv.writer(f, delimiter='\t',)
for j in range(len(DiscFac_list)):
my_writer.writerow([values[j], kappa_list[j], DiscFac_list[j], nabla_list[j], fit_list[j]])
f.close()
j = 0
for this_type in est_type_list:
setattr(this_type,parameter,old_value_storage[j])
this_type.update()
j += 1
# Only run below this line if module is run rather than imported:
if __name__ == "__main__":
# =================================================================
# ====== Make the list of consumer types for estimation ===========
#==================================================================
# Set target Lorenz points and K/Y ratio (MOVE THIS TO SetupParams)
if Params.do_liquid:
lorenz_target = np.array([0.0, 0.004, 0.025,0.117])
KY_target = 6.60
else: # This is hacky until I can find the liquid wealth data and import it
lorenz_target = getLorenzShares(Params.SCF_wealth,weights=Params.SCF_weights,percentiles=Params.percentiles_to_match)
#lorenz_target = np.array([-0.002, 0.01, 0.053,0.171])
KY_target = 10.26
# Make a vector of initial wealth-to-permanent income ratios
a_init = drawDiscrete(N=Params.sim_pop_size,P=Params.a0_probs,X=Params.a0_values,seed=Params.a0_seed)
# Make the list of types for this run, whether infinite or lifecycle
if Params.do_lifecycle:
# Make cohort scaling array
cohort_scale = Params.TFP_growth**(-np.arange(Params.total_T+1))
cohort_scale_array = np.tile(np.reshape(cohort_scale,(Params.total_T+1,1)),(1,Params.sim_pop_size))
# Make base consumer types for each education level
DropoutType = cstwMPCagent(**Params.init_dropout)
DropoutType.a_init = a_init
DropoutType.cohort_scale = cohort_scale_array
HighschoolType = deepcopy(DropoutType)
HighschoolType(**Params.adj_highschool)
CollegeType = deepcopy(DropoutType)
CollegeType(**Params.adj_college)
DropoutType.update()
HighschoolType.update()
CollegeType.update()
# Make initial distributions of permanent income for each education level
p_init_base = drawMeanOneLognormal(N=Params.sim_pop_size, sigma=Params.P0_sigma, seed=Params.P0_seed)
DropoutType.p_init = Params.P0_d*p_init_base
HighschoolType.p_init = Params.P0_h*p_init_base
CollegeType.p_init = Params.P0_c*p_init_base
# Set the type list for the lifecycle estimation
short_type_list = [DropoutType, HighschoolType, CollegeType]
spec_add = 'LC'
else:
# Make the base infinite horizon type and assign income shocks
InfiniteType = cstwMPCagent(**Params.init_infinite)
InfiniteType.tolerance = 0.0001
InfiniteType.a_init = 0*np.ones_like(a_init)
# Make histories of permanent income levels for the infinite horizon type
p_init_base = np.ones(Params.sim_pop_size,dtype=float)
InfiniteType.p_init = p_init_base
# Use a "tractable consumer" instead if desired.
# If you want this to work, you must edit TractableBufferStockModel slightly.
# See comments around line 34 in that module for instructions.
if Params.do_tractable:
from TractableBufferStockModel import TractableConsumerType
TractableInfType = TractableConsumerType(DiscFac=0.99, # will be overwritten
UnempPrb=1-InfiniteType.LivPrb[0],
Rfree=InfiniteType.Rfree,
PermGroFac=InfiniteType.PermGroFac[0],
CRRA=InfiniteType.CRRA,
sim_periods=InfiniteType.sim_periods,
IncUnemp=InfiniteType.IncUnemp,
Nagents=InfiniteType.Nagents)
TractableInfType.p_init = InfiniteType.p_init
TractableInfType.timeFwd()
TractableInfType.TranShkHist = InfiniteType.TranShkHist
TractableInfType.PermShkHist = InfiniteType.PermShkHist
TractableInfType.a_init = InfiniteType.a_init
# Set the type list for the infinite horizon estimation
if Params.do_tractable:
short_type_list = [TractableInfType]
spec_add = 'TC'
else:
short_type_list = [InfiniteType]
spec_add = 'IH'
# Expand the estimation type list if doing beta-dist
if Params.do_beta_dist:
long_type_list = []
for j in range(Params.pref_type_count):
long_type_list += deepcopy(short_type_list)
est_type_list = long_type_list
else:
est_type_list = short_type_list
if Params.do_liquid:
wealth_measure = 'Liquid'
else:
wealth_measure = 'NetWorth'
# =================================================================
# ====== Define estimation objectives =============================
#==================================================================
# Set commands for the beta-point estimation
beta_point_commands = ['solve()','unpackcFunc()','timeFwd()','simulateCSTW()']
# Make the objective function for the beta-point estimation
betaPointObjective = lambda DiscFac : simulateKYratioDifference(DiscFac,
nabla=0,
N=1,
type_list=est_type_list,
weights=Params.age_weight_all,
total_output=Params.total_output,
target=KY_target)
# Make the objective function for the beta-dist estimation
def betaDistObjective(nabla):
# Make the "intermediate objective function" for the beta-dist estimation
#print('Trying nabla=' + str(nabla))
intermediateObjective = lambda DiscFac : simulateKYratioDifference(DiscFac,
nabla=nabla,
N=Params.pref_type_count,
type_list=est_type_list,
weights=Params.age_weight_all,
total_output=Params.total_output,
target=KY_target)
if Params.do_tractable:
top = 0.98
else:
top = 0.998
DiscFac_new = brentq(intermediateObjective,0.90,top,xtol=10**(-8))
N=Params.pref_type_count
sim_wealth = (np.vstack((this_type.W_history for this_type in est_type_list))).flatten()
sim_weights = np.tile(np.repeat(Params.age_weight_all,Params.sim_pop_size),N)
my_diff = calculateLorenzDifference(sim_wealth,sim_weights,Params.percentiles_to_match,lorenz_target)
print('DiscFac=' + str(DiscFac_new) + ', nabla=' + str(nabla) + ', diff=' + str(my_diff))
if my_diff < Params.diff_save:
Params.DiscFac_save = DiscFac_new
return my_diff
# =================================================================
# ========= Estimating the model ==================================
#==================================================================
if Params.run_estimation:
# Estimate the model and time it
t_start = time()
if Params.do_beta_dist:
bracket = (0,0.015) # large nablas break IH version
nabla = golden(betaDistObjective,brack=bracket,tol=10**(-4))
DiscFac = Params.DiscFac_save
spec_name = spec_add + 'betaDist' + wealth_measure
else:
nabla = 0
if Params.do_tractable:
bot = 0.9
top = 0.98
else:
bot = 0.9
top = 1.0
DiscFac = brentq(betaPointObjective,bot,top,xtol=10**(-8))
spec_name = spec_add + 'betaPoint' + wealth_measure
t_end = time()
print('Estimate is DiscFac=' + str(DiscFac) + ', nabla=' + str(nabla) + ', took ' + str(t_end-t_start) + ' seconds.')
#spec_name=None
makeCSTWresults(DiscFac,nabla,spec_name)
# =================================================================
# ========= Relationship between DiscFac and K/Y ratio ===============
#==================================================================
if Params.find_beta_vs_KY:
t_start = time()
DiscFac_list = np.linspace(0.95,1.01,201)
KY_ratio_list = []
for DiscFac in DiscFac_list:
KY_ratio_list.append(betaPointObjective(DiscFac) + KY_target)
KY_ratio_list = np.array(KY_ratio_list)
t_end = time()
plt.plot(DiscFac_list,KY_ratio_list,'-k',linewidth=1.5)
plt.xlabel(r'Discount factor $\beta$',fontsize=14)
plt.ylabel('Capital to output ratio',fontsize=14)
print('That took ' + str(t_end-t_start) + ' seconds.')
plt.show()
with open('./Results/' + spec_add + '_KYbyBeta' + '.txt','w') as f:
my_writer = csv.writer(f, delimiter='\t',)
for j in range(len(DiscFac_list)):
my_writer.writerow([DiscFac_list[j], KY_ratio_list[j]])
f.close()
# =================================================================
# ========= Sensitivity analysis ==================================
#==================================================================
# Sensitivity analysis only set up for infinite horizon model!
if Params.do_lifecycle:
bracket = (0,0.015)
else:
bracket = (0,0.015) # large nablas break IH version
spec_name = None
if Params.do_sensitivity[0]: # coefficient of relative risk aversion sensitivity analysis
CRRA_list = np.linspace(0.5,4.0,15).tolist() #15
sensitivityAnalysis('CRRA',CRRA_list,False)
if Params.do_sensitivity[1]: # transitory income stdev sensitivity analysis
TranShkStd_list = [0.01] + np.linspace(0.05,0.8,16).tolist() #16
sensitivityAnalysis('TranShkStd',TranShkStd_list,True)
if Params.do_sensitivity[2]: # permanent income stdev sensitivity analysis
PermShkStd_list = np.linspace(0.02,0.18,17).tolist() #17
sensitivityAnalysis('PermShkStd',PermShkStd_list,True)
if Params.do_sensitivity[3]: # unemployment benefits sensitivity analysis
IncUnemp_list = np.linspace(0.0,0.8,17).tolist() #17
sensitivityAnalysis('IncUnemp',IncUnemp_list,False)
if Params.do_sensitivity[4]: # unemployment rate sensitivity analysis
UnempPrb_list = np.linspace(0.02,0.12,16).tolist() #16
sensitivityAnalysis('UnempPrb',UnempPrb_list,False)
if Params.do_sensitivity[5]: # mortality rate sensitivity analysis
LivPrb_list = 1.0 - np.linspace(0.003,0.0125,16).tolist() #16
sensitivityAnalysis('LivPrb',LivPrb_list,True)
if Params.do_sensitivity[6]: # permanent income growth rate sensitivity analysis
PermGroFac_list = np.linspace(0.00,0.04,17).tolist() #17
sensitivityAnalysis('PermGroFac',PermGroFac_list,True)
if Params.do_sensitivity[7]: # interest rate sensitivity analysis
Rfree_list = (np.linspace(1.0,1.04,17)/InfiniteType.survival_prob[0]).tolist()
sensitivityAnalysis('Rfree',Rfree_list,False)
# =======================================================================
# ========= FBS aggregate shocks model ==================================
#========================================================================
if Params.do_agg_shocks:
# These are the perpetual youth estimates in case we want to skip estimation (and we do)
beta_point_estimate = 0.989142
beta_dist_estimate = 0.985773
nabla_estimate = 0.0077
# Make a set of consumer types for the FBS aggregate shocks model
BaseAggShksType = AggShockConsumerType(**Params.init_agg_shocks)
agg_shocks_type_list = []
for j in range(Params.pref_type_count):
new_type = deepcopy(BaseAggShksType)
new_type.seed = j
new_type.resetRNG()
new_type.makeIncShkHist()
agg_shocks_type_list.append(new_type)
if Params.do_beta_dist:
beta_agg = beta_dist_estimate
nabla_agg = nabla_estimate
else:
beta_agg = beta_point_estimate
nabla_agg = 0.0
DiscFac_list_agg = approxUniform(N=Params.pref_type_count,bot=beta_agg-nabla_agg,top=beta_agg+nabla_agg)[1]
assignBetaDistribution(agg_shocks_type_list,DiscFac_list_agg)
# Make a market for solving the FBS aggregate shocks model
agg_shocks_market = CobbDouglasEconomy(agents = agg_shocks_type_list,
act_T = Params.sim_periods_agg_shocks,
tolerance = 0.0001,
**Params.aggregate_params)
agg_shocks_market.makeAggShkHist()
# Edit the consumer types so they have the right data
for this_type in agg_shocks_market.agents:
this_type.p_init = drawMeanOneLognormal(N=this_type.Nagents,sigma=0.9,seed=0)
this_type.getEconomyData(agg_shocks_market)
# Solve the aggregate shocks version of the model
t_start = time()
agg_shocks_market.solve()
t_end = time()
print('Solving the aggregate shocks model took ' + str(t_end - t_start) + ' seconds.')
for this_type in agg_shocks_type_list:
this_type.W_history = this_type.pHist*this_type.bHist
this_type.kappa_history = 1.0 - (1.0 - this_type.MPChist)**4
agg_shock_weights = np.concatenate((np.zeros(200),np.ones(Params.sim_periods_agg_shocks-200)))
agg_shock_weights = agg_shock_weights/np.sum(agg_shock_weights)
makeCSTWstats(beta_agg,nabla_agg,agg_shocks_type_list,agg_shock_weights)
|
|
import boto3
import time
import pprint
import socket
import traceback
from retrying import retry
import logging
from logging.handlers import SysLogHandler
import memcache
import task_fns as tf
import config as conf
RUN_TASK_RETRIES = 3
RUN_TASK_WAIT_SECS = 2
TASK_INFO_RETRIES = 7
TASK_INFO_WAIT_SECS = 1
DESCRIBE_INSTANCE_WAIT_SECS = 1
DESCRIBE_INSTANCE_RETRIES = 3
CONNECT_RETRIES = 15
CONNECT_WAIT_SECS = 1
FIND_TASK_RETRIES = 5
FIND_TASK_WAIT_SECS = 1
KILL_TASK_RETRIES = 5
KILL_TASK_WAIT_SECS = 1
MEMORY_PER_TASK = 768
TASKS_AVAILABLE = 10
MAX_TASK_AGE = 259200
ECS_CLUSTER_NAME = conf.get_config('ECS_CLUSTER_NAME')
ECS_AUTO_SCALING_GROUP_NAME = conf.get_config('ECS_AUTO_SCALING_GROUP_NAME')
pp = pprint.PrettyPrinter(indent=4)
class ContextFilter(logging.Filter):
hostname = socket.gethostname()
def filter(self, record):
record.hostname = ContextFilter.hostname
return True
f = ContextFilter()
syslog = SysLogHandler(address=(conf.get_config('SYSLOG_HOST'), conf.get_config('SYSLOG_PORT')))
formatter = logging.Formatter('%(asctime)s twitter.dockerexec: %(message).60s', datefmt='%b %d %H:%M:%S')
syslog.setFormatter(formatter)
tn_logger = logging.getLogger('neo4j.twitter')
tn_logger.setLevel(logging.INFO)
tn_logger.addFilter(f)
syslog.setFormatter(formatter)
tn_logger.addHandler(syslog)
@retry(stop_max_attempt_number=KILL_TASK_RETRIES, wait_fixed=(KILL_TASK_WAIT_SECS * 1000))
def kill_task(ecs, arn, user):
tn_logger.info('Kill: tw:%s %s' % (user, arn))
ecs.stop_task(
cluster=ECS_CLUSTER_NAME,
task=arn)
@retry(stop_max_attempt_number=FIND_TASK_RETRIES, wait_fixed=(FIND_TASK_WAIT_SECS * 1000))
def find_task_set(ecs, next_token=None):
task_ids = []
task_descs = []
if next_token:
response = ecs.list_tasks(
cluster=ECS_CLUSTER_NAME,
maxResults=10,
nextToken=next_token)
else:
response = ecs.list_tasks(
cluster=ECS_CLUSTER_NAME,
maxResults=10)
if 'taskArns' in response:
for arn in response['taskArns']:
task_ids.append(arn)
if len(task_ids) > 0:
td = ecs.describe_tasks(
cluster=ECS_CLUSTER_NAME,
tasks=task_ids)
task_descs.extend(td['tasks'])
if 'nextToken' in response:
task_descs.extend(find_task_set(ecs, response['nextToken']))
return task_descs
def update_task_list():
ecs = boto3.client('ecs', region_name=conf.get_config('AWS_REGION_NAME'))
ec2 = boto3.client('ec2', region_name=conf.get_config('AWS_REGION_NAME'))
mc = memcache.Client([ conf.get_config('MEMCACHE_HOST_PORT') ], debug=0)
task_descs = find_task_set(ecs)
tasksd = {}
current_time = time.time()
for task in task_descs:
cos = task['overrides']['containerOverrides']
env_vars = {}
for co in cos:
if 'environment' in co:
for env_var in co['environment']:
env_vars[ env_var['name'] ] = env_var['value']
if 'TWITTER_USER' in env_vars:
task_info = {}
task_info['conn_string'] = tf.get_all_ti(ecs, ec2, task['taskArn'])
task_info['task_arn'] = task['taskArn']
if 'TIME_STARTED' in env_vars:
task_info['time_started'] = int(float(env_vars['TIME_STARTED']))
if 'NEO4J_PASSWORD' in env_vars:
task_info['n4j_password'] = env_vars['NEO4J_PASSWORD']
if current_time > (task_info['time_started'] + MAX_TASK_AGE):
kill_task(ecs, task['taskArn'], env_vars['TWITTER_USER'])
elif env_vars['TWITTER_USER'] in tasksd:
if 'time_started' in tasksd[ env_vars['TWITTER_USER'] ]:
if int(float(env_vars['TIME_STARTED'])) > tasksd[ env_vars['TWITTER_USER'] ]['time_started']:
kill_task(ecs, tasksd[ env_vars['TWITTER_USER'] ]['task_arn'], env_vars['TWITTER_USER'] )
tasksd[ env_vars['TWITTER_USER'] ] = task_info
else:
kill_task(ecs, task['taskArn'], env_vars['TWITTER_USER'])
else:
tasksd[ env_vars['TWITTER_USER'] ] = task_info
mc.set("task_list", tasksd)
def check_utilization():
instances = []
ecs = boto3.client('ecs', region_name=conf.get_config('AWS_REGION_NAME'))
autos = boto3.client('autoscaling', region_name=conf.get_config('AWS_REGION_NAME'))
response = ecs.list_container_instances(
cluster=ECS_CLUSTER_NAME,
maxResults=100)
container_instances = response['containerInstanceArns']
response = ecs.describe_container_instances(
cluster=ECS_CLUSTER_NAME,
containerInstances=container_instances)
for instance in response['containerInstances']:
remaining_memory = 0
registered_memory = 0
for resource in instance['remainingResources']:
if resource['name'] == 'MEMORY':
remaining_memory = remaining_memory + resource['integerValue']
for resource in instance['registeredResources']:
if resource['name'] == 'MEMORY':
registered_memory = registered_memory + resource['integerValue']
instance_description = {
'arn': instance['containerInstanceArn'],
'ec2instance': instance['ec2InstanceId'],
'remaining_memory': remaining_memory,
'registered_memory': registered_memory,
'status': instance['status'],
'runningTasks': instance['runningTasksCount'] }
instances.append(instance_description)
total_remaining_memory = 0
pending_instances = False
for instance in instances:
total_remaining_memory = total_remaining_memory + instance['remaining_memory']
print 'TOTAL REMAINING MEMORY: %d' % total_remaining_memory
if total_remaining_memory < (MEMORY_PER_TASK * TASKS_AVAILABLE):
print 'NEED MORE INSTANCES'
asg = autos.describe_auto_scaling_groups(
AutoScalingGroupNames=[ECS_AUTO_SCALING_GROUP_NAME]
)
capacity = asg['AutoScalingGroups'][0]['DesiredCapacity']
pp.pprint(capacity)
autos.set_desired_capacity(
AutoScalingGroupName=ECS_AUTO_SCALING_GROUP_NAME,
DesiredCapacity = capacity + 1,
HonorCooldown = True
)
asg = autos.describe_auto_scaling_groups(
AutoScalingGroupNames=[ECS_AUTO_SCALING_GROUP_NAME]
)
capacity = asg['AutoScalingGroups'][0]['DesiredCapacity']
pp.pprint(capacity)
elif total_remaining_memory > (2 * (MEMORY_PER_TASK * TASKS_AVAILABLE)):
print 'ATTEMPTING TO TERMINATE INSTANCES'
terminated_instance = False
for instance in instances:
if instance['runningTasks'] == 0 and not terminated_instance and (total_remaining_memory - instance['registered_memory']) > (MEMORY_PER_TASK * TASKS_AVAILABLE):
print 'TERMINATING INSTANCE: %s' % instance['ec2instance']
autos.terminate_instance_in_auto_scaling_group(
InstanceId=instance['ec2instance'],
ShouldDecrementDesiredCapacity=True)
terminated_instance = True
else:
print 'DO NOT NEED MORE INSTANCES'
update_task_list()
check_utilization()
|
|
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from hyperv.neutron import utils
class HyperVUtilsV2(utils.HyperVUtils):
_EXTERNAL_PORT = 'Msvm_ExternalEthernetPort'
_ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort'
_PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData'
_PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData'
_PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData'
_PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA
_LAN_ENDPOINT = 'Msvm_LANEndpoint'
_STATE_DISABLED = 3
_OPERATION_MODE_ACCESS = 1
_VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_VM_SUMMARY_ENABLED_STATE = 100
_HYPERV_VM_STATE_ENABLED = 2
_ACL_DIR_IN = 1
_ACL_DIR_OUT = 2
_ACL_TYPE_IPV4 = 2
_ACL_TYPE_IPV6 = 3
_ACL_ACTION_ALLOW = 1
_ACL_ACTION_DENY = 2
_ACL_ACTION_METER = 3
_METRIC_ENABLED = 2
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
_ACL_APPLICABILITY_LOCAL = 1
_ACL_APPLICABILITY_REMOTE = 2
_ACL_DEFAULT = 'ANY'
_IPV4_ANY = '0.0.0.0/0'
_IPV6_ANY = '::/0'
_TCP_PROTOCOL = 'tcp'
_UDP_PROTOCOL = 'udp'
_ICMP_PROTOCOL = '1'
_MAX_WEIGHT = 65500
# 2 directions x 2 address types = 4 ACLs
_REJECT_ACLS_COUNT = 4
_wmi_namespace = '//./root/virtualization/v2'
def __init__(self):
super(HyperVUtilsV2, self).__init__()
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
vnic = self._get_vnic_settings(switch_port_name)
vswitch = self._get_vswitch(vswitch_name)
port, found = self._get_switch_port_allocation(switch_port_name, True)
port.HostResource = [vswitch.path_()]
port.Parent = vnic.path_()
if not found:
vm = self._get_vm_from_res_setting_data(vnic)
self._add_virt_resource(vm, port)
else:
self._modify_virt_resource(port)
def _modify_virt_resource(self, res_setting_data):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, out_set_data, ret_val) = vs_man_svc.ModifyResourceSettings(
ResourceSettings=[res_setting_data.GetText_(1)])
self._check_job_status(ret_val, job_path)
def _add_virt_resource(self, vm, res_setting_data):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, out_set_data, ret_val) = vs_man_svc.AddResourceSettings(
vm.path_(), [res_setting_data.GetText_(1)])
self._check_job_status(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.RemoveResourceSettings(
ResourceSettings=[res_setting_data.path_()])
self._check_job_status(ret_val, job)
def _add_virt_feature(self, element, res_setting_data):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, out_set_data, ret_val) = vs_man_svc.AddFeatureSettings(
element.path_(), [res_setting_data.GetText_(1)])
self._check_job_status(ret_val, job_path)
def _remove_virt_feature(self, feature_resource):
self._remove_multiple_virt_features([feature_resource])
def _remove_multiple_virt_features(self, feature_resources):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveFeatureSettings(
FeatureSettings=[f.path_() for f in feature_resources])
self._check_job_status(ret_val, job_path)
def disconnect_switch_port(
self, vswitch_name, switch_port_name, vnic_deleted, delete_port):
"""Disconnects the switch port."""
sw_port, found = self._get_switch_port_allocation(switch_port_name)
if not sw_port:
# Port not found. It happens when the VM was already deleted.
return
if delete_port:
self._remove_virt_resource(sw_port)
else:
sw_port.EnabledState = self._STATE_DISABLED
self._modify_virt_resource(sw_port)
def _get_vswitch(self, vswitch_name):
vswitch = self._conn.Msvm_VirtualEthernetSwitch(
ElementName=vswitch_name)
if not len(vswitch):
raise utils.HyperVException(msg=_('VSwitch not found: %s') %
vswitch_name)
return vswitch[0]
def set_switch_external_port_trunk_vlan(self, vswitch_name, vlan_id,
desired_endpoint_mode):
pass
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name):
port_alloc, found = self._get_switch_port_allocation(switch_port_name)
if not found:
raise utils.HyperVException(
msg=_('Port Allocation not found: %s') % switch_port_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
if vlan_settings:
# Removing the feature because it cannot be modified
# due to a wmi exception.
(job_path, ret_val) = vs_man_svc.RemoveFeatureSettings(
FeatureSettings=[vlan_settings.path_()])
self._check_job_status(ret_val, job_path)
(vlan_settings, found) = self._get_vlan_setting_data(switch_port_name)
vlan_settings.AccessVlanId = vlan_id
vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS
(job_path, out, ret_val) = vs_man_svc.AddFeatureSettings(
port_alloc.path_(), [vlan_settings.GetText_(1)])
self._check_job_status(ret_val, job_path)
def _get_vlan_setting_data_from_port_alloc(self, port_alloc):
return self._get_first_item(port_alloc.associators(
wmi_result_class=self._PORT_VLAN_SET_DATA))
def _get_vlan_setting_data(self, switch_port_name, create=True):
return self._get_setting_data(
self._PORT_VLAN_SET_DATA,
switch_port_name, create)
def _get_switch_port_allocation(self, switch_port_name, create=False):
return self._get_setting_data(
self._PORT_ALLOC_SET_DATA,
switch_port_name, create)
def _get_setting_data(self, class_name, element_name, create=True):
element_name = element_name.replace("'", '"')
q = self._conn.query("SELECT * FROM %(class_name)s WHERE "
"ElementName = '%(element_name)s'" %
{"class_name": class_name,
"element_name": element_name})
data = self._get_first_item(q)
found = data is not None
if not data and create:
data = self._get_default_setting_data(class_name)
data.ElementName = element_name
return data, found
def _get_default_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _get_first_item(self, obj):
if obj:
return obj[0]
def enable_port_metrics_collection(self, switch_port_name):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
return
# Add the ACLs only if they don't already exist
acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA)
for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]:
for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]:
_acls = self._filter_acls(
acls, self._ACL_ACTION_METER, acl_dir, acl_type)
if not _acls:
acl = self._create_acl(
acl_dir, acl_type, self._ACL_ACTION_METER)
self._add_virt_feature(port, acl)
def enable_control_metrics(self, switch_port_name):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
return
metric_svc = self._conn.Msvm_MetricService()[0]
metric_names = [self._NET_IN_METRIC_NAME, self._NET_OUT_METRIC_NAME]
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if metric_def:
metric_svc.ControlMetrics(
Subject=port.path_(),
Definition=metric_def[0].path_(),
MetricCollectionEnabled=self._METRIC_ENABLED)
def can_enable_control_metrics(self, switch_port_name):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
return False
if not self._is_port_vm_started(port):
return False
# all 4 meter ACLs must be existent first. (2 x direction)
acls = port.associators(wmi_result_class=self._PORT_ALLOC_ACL_SET_DATA)
acls = [a for a in acls if a.Action == self._ACL_ACTION_METER]
if len(acls) < 2:
return False
return True
def _is_port_vm_started(self, port):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = port.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA)
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[self._VM_SUMMARY_ENABLED_STATE],
[v.path_() for v in vmsettings])
if ret_val or not summary_info:
raise utils.HyperVException(msg=_('Cannot get VM summary data '
'for: %s') % port.ElementName)
return summary_info[0].EnabledState is self._HYPERV_VM_STATE_ENABLED
def create_security_rule(self, switch_port_name, direction, acl_type,
local_port, protocol, remote_address):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
return
# Add the ACLs only if they don't already exist
acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA)
weight = self._get_new_weight(acls)
self._bind_security_rule(
port, direction, acl_type, self._ACL_ACTION_ALLOW, local_port,
protocol, remote_address, weight)
def remove_security_rule(self, switch_port_name, direction, acl_type,
local_port, protocol, remote_address):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
# Port not found. It happens when the VM was already deleted.
return
acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA)
filtered_acls = self._filter_security_acls(
acls, self._ACL_ACTION_ALLOW, direction, acl_type, local_port,
protocol, remote_address)
for acl in filtered_acls:
self._remove_virt_feature(acl)
def remove_all_security_rules(self, switch_port_name):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
# Port not found. It happens when the VM was already deleted.
return
acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA)
filtered_acls = [a for a in acls if
a.Action is not self._ACL_ACTION_METER]
if filtered_acls:
self._remove_multiple_virt_features(filtered_acls)
def create_default_reject_all_rules(self, switch_port_name):
port, found = self._get_switch_port_allocation(switch_port_name, False)
if not found:
raise utils.HyperVException(
msg=_('Port Allocation not found: %s') % switch_port_name)
acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA)
filtered_acls = [v for v in acls if v.Action == self._ACL_ACTION_DENY]
if len(filtered_acls) >= self._REJECT_ACLS_COUNT:
return
for acl in filtered_acls:
self._remove_virt_feature(acl)
weight = 0
ipv4_pair = (self._ACL_TYPE_IPV4, self._IPV4_ANY)
ipv6_pair = (self._ACL_TYPE_IPV6, self._IPV6_ANY)
for direction in [self._ACL_DIR_IN, self._ACL_DIR_OUT]:
for acl_type, address in [ipv4_pair, ipv6_pair]:
for protocol in [self._TCP_PROTOCOL,
self._UDP_PROTOCOL,
self._ICMP_PROTOCOL]:
self._bind_security_rule(
port, direction, acl_type, self._ACL_ACTION_DENY,
self._ACL_DEFAULT, protocol, address, weight)
weight += 1
def _bind_security_rule(self, port, direction, acl_type, action,
local_port, protocol, remote_address, weight):
acls = port.associators(wmi_result_class=self._PORT_EXT_ACL_SET_DATA)
filtered_acls = self._filter_security_acls(
acls, action, direction, acl_type, local_port, protocol,
remote_address)
for acl in filtered_acls:
self._remove_virt_feature(acl)
acl = self._create_security_acl(
direction, acl_type, action, local_port, protocol, remote_address,
weight)
self._add_virt_feature(port, acl)
def _create_acl(self, direction, acl_type, action):
acl = self._get_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA)
acl.set(Direction=direction,
AclType=acl_type,
Action=action,
Applicability=self._ACL_APPLICABILITY_LOCAL)
return acl
def _create_security_acl(self, direction, acl_type, action, local_port,
protocol, remote_ip_address, weight):
acl = self._create_acl(direction, acl_type, action)
(remote_address, remote_prefix_length) = remote_ip_address.split('/')
acl.set(Applicability=self._ACL_APPLICABILITY_REMOTE,
RemoteAddress=remote_address,
RemoteAddressPrefixLength=remote_prefix_length)
return acl
def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""):
return [v for v in acls
if v.Action == action and
v.Direction == direction and
v.AclType == acl_type and
v.RemoteAddress == remote_addr]
def _filter_security_acls(self, acls, acl_action, direction, acl_type,
local_port, protocol, remote_addr=""):
(remote_address, remote_prefix_length) = remote_addr.split('/')
remote_prefix_length = int(remote_prefix_length)
return [v for v in acls
if v.Direction == direction and
v.Action in [self._ACL_ACTION_ALLOW, self._ACL_ACTION_DENY] and
v.AclType == acl_type and
v.RemoteAddress == remote_address and
v.RemoteAddressPrefixLength == remote_prefix_length]
def _get_new_weight(self, acls):
return 0
class HyperVUtilsV2R2(HyperVUtilsV2):
_PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData'
_MAX_WEIGHT = 65500
# 2 directions x 2 address types x 3 protocols = 12 ACLs
_REJECT_ACLS_COUNT = 12
def _create_security_acl(self, direction, acl_type, action, local_port,
protocol, remote_addr, weight):
acl = self._get_default_setting_data(self._PORT_EXT_ACL_SET_DATA)
acl.set(Direction=direction,
Action=action,
LocalPort=str(local_port),
Protocol=protocol,
RemoteIPAddress=remote_addr,
IdleSessionTimeout=0,
Weight=weight)
return acl
def _filter_security_acls(self, acls, action, direction, acl_type,
local_port, protocol, remote_addr=""):
return [v for v in acls
if v.Action == action and
v.Direction == direction and
v.LocalPort == str(local_port) and
v.Protocol == protocol and
v.RemoteIPAddress == remote_addr]
def _get_new_weight(self, acls):
acls = [a for a in acls if a.Action is not self._ACL_ACTION_DENY]
if not acls:
return self._MAX_WEIGHT - 1
weights = [a.Weight for a in acls]
min_weight = min(weights)
for weight in range(min_weight, self._MAX_WEIGHT):
if weight not in weights:
return weight
return min_weight - 1
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple speech recognition to spot a limited number of keywords.
This is a self-contained example script that will train a very basic audio
recognition model in TensorFlow. It downloads the necessary training data and
runs with reasonable defaults to train within a few hours even only using a CPU.
For more information, please see
https://www.tensorflow.org/tutorials/audio_recognition.
It is intended as an introduction to using neural networks for audio
recognition, and is not a full speech recognition system. For more advanced
speech systems, I recommend looking into Kaldi. This network uses a keyword
detection style to spot discrete words from a small vocabulary, consisting of
"yes", "no", "up", "down", "left", "right", "on", "off", "stop", and "go".
To run the training process, use:
bazel run tensorflow/examples/speech_commands:train
This will write out checkpoints to /tmp/speech_commands_train/, and will
download over 1GB of open source training data, so you'll need enough free space
and a good internet connection. The default data is a collection of thousands of
one-second .wav files, each containing one spoken word. This data set is
collected from https://aiyprojects.withgoogle.com/open_speech_recording, please
consider contributing to help improve this and other models!
As training progresses, it will print out its accuracy metrics, which should
rise above 90% by the end. Once it's complete, you can run the freeze script to
get a binary GraphDef that you can easily deploy on mobile applications.
If you want to train on your own data, you'll need to create .wavs with your
recordings, all at a consistent length, and then arrange them into subfolders
organized by label. For example, here's a possible file structure:
my_wavs >
up >
audio_0.wav
audio_1.wav
down >
audio_2.wav
audio_3.wav
other>
audio_4.wav
audio_5.wav
You'll also need to tell the script what labels to look for, using the
`--wanted_words` argument. In this case, 'up,down' might be what you want, and
the audio in the 'other' folder would be used to train an 'unknown' category.
To pull this all together, you'd run:
bazel run tensorflow/examples/speech_commands:train -- \
--data_dir=my_wavs --wanted_words=up,down
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
from tensorflow.python.platform import gfile
FLAGS = None
def main(_):
# We want to see all the logging messages for this tutorial.
tf.logging.set_verbosity(tf.logging.INFO)
# Start a new TensorFlow session.
sess = tf.InteractiveSession()
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(FLAGS.wanted_words.split(','))),
FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)
audio_processor = input_data.AudioProcessor(
FLAGS.data_url, FLAGS.data_dir, FLAGS.silence_percentage,
FLAGS.unknown_percentage,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings)
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
time_shift_samples = int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000)
# Figure out the learning rates for each training phase. Since it's often
# effective to have high learning rates at the start of training, followed by
# lower levels towards the end, the number of steps and learning rates can be
# specified as comma-separated lists to define the rate at each stage. For
# example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001
# will run 13,000 training loops in total, with a rate of 0.001 for the first
# 10,000, and 0.0001 for the final 3,000.
training_steps_list = map(int, FLAGS.how_many_training_steps.split(','))
learning_rates_list = map(float, FLAGS.learning_rate.split(','))
if len(training_steps_list) != len(learning_rates_list):
raise Exception(
'--how_many_training_steps and --learning_rate must be equal length '
'lists, but are %d and %d long instead' % (len(training_steps_list),
len(learning_rates_list)))
fingerprint_input = tf.placeholder(
tf.float32, [None, fingerprint_size], name='fingerprint_input')
logits, dropout_prob = models.create_model(
fingerprint_input,
model_settings,
FLAGS.model_architecture,
is_training=True)
# Define loss and optimizer
ground_truth_input = tf.placeholder(
tf.float32, [None, label_count], name='groundtruth_input')
# Optionally we can add runtime checks to spot when NaNs or other symptoms of
# numerical errors start occurring during training.
control_dependencies = []
if FLAGS.check_nans:
checks = tf.add_check_numerics_ops()
control_dependencies = [checks]
# Create the back propagation and training evaluation machinery in the graph.
with tf.name_scope('cross_entropy'):
cross_entropy_mean = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits))
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'), tf.control_dependencies(control_dependencies):
learning_rate_input = tf.placeholder(
tf.float32, [], name='learning_rate_input')
train_step = tf.train.GradientDescentOptimizer(
learning_rate_input).minimize(cross_entropy_mean)
predicted_indices = tf.argmax(logits, 1)
expected_indices = tf.argmax(ground_truth_input, 1)
correct_prediction = tf.equal(predicted_indices, expected_indices)
confusion_matrix = tf.confusion_matrix(expected_indices, predicted_indices)
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
global_step = tf.contrib.framework.get_or_create_global_step()
increment_global_step = tf.assign(global_step, global_step + 1)
saver = tf.train.Saver(tf.global_variables())
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')
tf.global_variables_initializer().run()
start_step = 1
if FLAGS.start_checkpoint:
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
start_step = global_step.eval(session=sess)
tf.logging.info('Training from step: %d ', start_step)
# Save graph.pbtxt.
tf.train.write_graph(sess.graph_def, FLAGS.train_dir,
FLAGS.model_architecture + '.pbtxt')
# Save list of words.
with gfile.GFile(
os.path.join(FLAGS.train_dir, FLAGS.model_architecture + '_labels.txt'),
'w') as f:
f.write('\n'.join(audio_processor.words_list))
# Training loop.
training_steps_max = np.sum(training_steps_list)
for training_step in xrange(start_step, training_steps_max + 1):
# Figure out what the current learning rate is.
training_steps_sum = 0
for i in range(len(training_steps_list)):
training_steps_sum += training_steps_list[i]
if training_step <= training_steps_sum:
learning_rate_value = learning_rates_list[i]
break
# Pull the audio samples we'll use for training.
train_fingerprints, train_ground_truth = audio_processor.get_data(
FLAGS.batch_size, 0, model_settings, FLAGS.background_frequency,
FLAGS.background_volume, time_shift_samples, 'training', sess)
# Run the graph with this batch of training data.
train_summary, train_accuracy, cross_entropy_value, _, _ = sess.run(
[
merged_summaries, evaluation_step, cross_entropy_mean, train_step,
increment_global_step
],
feed_dict={
fingerprint_input: train_fingerprints,
ground_truth_input: train_ground_truth,
learning_rate_input: learning_rate_value,
dropout_prob: 0.5
})
train_writer.add_summary(train_summary, training_step)
tf.logging.info('Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' %
(training_step, learning_rate_value, train_accuracy * 100,
cross_entropy_value))
is_last_step = (training_step == training_steps_max)
if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step:
set_size = audio_processor.set_size('validation')
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
validation_fingerprints, validation_ground_truth = (
audio_processor.get_data(FLAGS.batch_size, i, model_settings, 0.0,
0.0, 0, 'validation', sess))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy, conf_matrix = sess.run(
[merged_summaries, evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: validation_fingerprints,
ground_truth_input: validation_ground_truth,
dropout_prob: 1.0
})
validation_writer.add_summary(validation_summary, training_step)
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (validation_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Step %d: Validation accuracy = %.1f%% (N=%d)' %
(training_step, total_accuracy * 100, set_size))
# Save the model checkpoint periodically.
if (training_step % FLAGS.save_step_interval == 0 or
training_step == training_steps_max):
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.model_architecture + '.ckpt')
tf.logging.info('Saving to "%s-%d"', checkpoint_path, training_step)
saver.save(sess, checkpoint_path, global_step=training_step)
set_size = audio_processor.set_size('testing')
tf.logging.info('set_size=%d', set_size)
total_accuracy = 0
total_conf_matrix = None
for i in xrange(0, set_size, FLAGS.batch_size):
test_fingerprints, test_ground_truth = audio_processor.get_data(
FLAGS.batch_size, i, model_settings, 0.0, 0.0, 0, 'testing', sess)
test_accuracy, conf_matrix = sess.run(
[evaluation_step, confusion_matrix],
feed_dict={
fingerprint_input: test_fingerprints,
ground_truth_input: test_ground_truth,
dropout_prob: 1.0
})
batch_size = min(FLAGS.batch_size, set_size - i)
total_accuracy += (test_accuracy * batch_size) / set_size
if total_conf_matrix is None:
total_conf_matrix = conf_matrix
else:
total_conf_matrix += conf_matrix
tf.logging.info('Confusion Matrix:\n %s' % (total_conf_matrix))
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % (total_accuracy * 100,
set_size))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data archive on the web.')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset/',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--unknown_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be unknown words.
""")
parser.add_argument(
'--time_shift_ms',
type=float,
default=100.0,
help="""\
Range to randomly shift the training audio by in time.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--dct_coefficient_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',)
parser.add_argument(
'--how_many_training_steps',
type=str,
default='15000,3000',
help='How many training loops to run',)
parser.add_argument(
'--eval_step_interval',
type=int,
default=400,
help='How often to evaluate the training results.')
parser.add_argument(
'--learning_rate',
type=str,
default='0.001,0.0001',
help='How large a learning rate to use when training.')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='How many items to train with at once',)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/speech_commands_train',
help='Directory to write event logs and checkpoint.')
parser.add_argument(
'--save_step_interval',
type=int,
default=100,
help='Save model checkpoint every save_steps.')
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--check_nans',
type=bool,
default=False,
help='Whether to check for invalid numbers during processing')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift import gettext_ as _
from swift.common.utils import search_tree, remove_file, write_file
from swift.common.exceptions import InvalidPidFileException
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
PROC_DIR = '/proc'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-reconciler',
'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator',
'object-reconstructor', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer', 'container-reconciler']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
MAX_PROCS = 8192 # workers * disks * threads_per_disk, can get high
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print(_("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print(_("WARNING: Unable to modify memory limit. "
"Running as non-root?"))
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print(_("WARNING: Unable to modify max process limit. "
"Running as non-root?"))
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
def safe_kill(pid, sig, name):
"""Send signal to process and check process name
: param pid: process id
: param sig: signal to send
: param name: name to ensure target process
"""
# check process name for SIG_DFL
if sig == signal.SIG_DFL:
try:
proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
if os.path.exists(proc_file):
with open(proc_file, 'r') as fd:
if name not in fd.read():
# unknown process is using the pid
raise InvalidPidFileException()
except IOError:
pass
os.kill(pid, sig)
class UnknownCommandError(Exception):
pass
class Manager(object):
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
self.server_names = set()
for server in servers:
if server == 'all':
self.server_names.update(ALL_SERVERS)
elif server == 'main':
self.server_names.update(MAIN_SERVERS)
elif server == 'rest':
self.server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
self.server_names.update([
s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
self.server_names.add(server)
self.servers = set()
for name in self.server_names:
self.servers.add(Server(name, run_dir))
def __iter__(self):
return iter(self.servers)
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print(_('\nuser quit'))
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print(_('No %s running') % server)
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print(_("%s (%s) appears to have stopped") % (server, killed_pid))
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all processes have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print(_('Waited %s seconds for %s to die; giving up') % (
kill_wait, server))
return 1
@command
def kill(self, **kwargs):
"""stop a server (no error if not running)
"""
status = self.stop(**kwargs)
kwargs['quiet'] = True
if status and not self.status(**kwargs):
# only exit error if the server is still running
return status
return 0
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.server_names:
m = Manager([server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
try:
f = getattr(self, cmd)
except AttributeError:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server(object):
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
self.server = server.lower()
if '.' in self.server:
self.server, self.conf = self.server.rsplit('.', 1)
else:
self.conf = None
if '-' not in self.server:
self.server = '%s-server' % self.server
self.type = self.server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % self.server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).replace(
'.conf', '.pid', 1)
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
'.pid', '.conf', 1)
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).replace(
'.pid', '.conf', 1)
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
server_search = self.server
else:
server_search = "%s-server" % self.type
if self.conf is not None:
found_conf_files = search_tree(SWIFT_DIR, server_search,
self.conf + '.conf',
dir_ext=self.conf + '.conf.d')
else:
found_conf_files = search_tree(SWIFT_DIR, server_search + '*',
'.conf', dir_ext='.conf.d')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
if number:
print(_('Unable to locate config number %s for %s')
% (number, self.server))
else:
print(_('Unable to locate config for %s') % self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print(_('Found configs:'))
for i, conf_file in enumerate(found_conf_files):
print(' %d) %s' % (i + 1, conf_file))
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
if self.conf is not None:
pid_files = search_tree(self.run_dir, '%s*' % self.server,
exts=[self.conf + '.pid',
self.conf + '.pid.d'])
else:
pid_files = search_tree(self.run_dir, '%s*' % self.server)
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
try:
pid = int(open(pid_file).read().strip())
except ValueError:
pid = None
yield pid_file, pid
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
if not pid: # Catches None and 0
print (_('Removing pid file %s with invalid pid') % pid_file)
remove_file(pid_file)
continue
try:
if sig != signal.SIG_DFL:
print(_('Signal %s pid: %s signal: %s') % (self.server,
pid, sig))
safe_kill(pid, sig, 'swift-%s' % self.server)
except InvalidPidFileException as e:
if kwargs.get('verbose'):
print(_('Removing pid file %s with wrong pid %d') % (
pid_file, pid))
remove_file(pid_file)
except OSError as e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print(_("Removing stale pid file %s") % pid_file)
remove_file(pid_file)
elif e.errno == errno.EPERM:
print(_("No permission to signal PID %d") % pid)
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print(_("%s #%d not running (%s)") % (self.server, number,
conf_files[0]))
else:
print(_("No %s running") % self.server)
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print(_("%s running (%s - %s)") % (self.server, pid, conf_file))
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if false ask server to log to console
:returns : the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if kwargs.get('once', False):
# if you don't want once to wait you can send it to the
# background on the command line, I generally just run with
# no-daemon anyway, but this is quieter
proc.wait()
if output:
print(output)
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return {}
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print(_("%s running (%s - %s)") %
(self.server, pid, conf_file))
elif not kwargs.get('number', 0):
already_started = True
print(_("%s running (%s - %s)") % (self.server, pid, pid_file))
if already_started:
print(_("%s already started...") % self.server)
return {}
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print('%s...(%s)' % (msg, conf_file))
try:
pid = self.spawn(conf_file, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
# TODO(clayg): should I check if self.cmd exists earlier?
print(_("%s does not exist") % self.cmd)
break
else:
raise
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
|
|
''' This module contains class ParamsFrame which is responsible for
all operations with parameters.
Attributes:
MAX_FILE_LBL_LENGTH (int): maximum length of the label with
file name.
TEXT_FOR_PARAMS_LBL (str): text displayed on one of the labels.
'''
import os
from tkinter import W, E, S, N, LEFT, IntVar
from tkinter.ttk import Frame, Label, Button, Notebook
from tkinter.ttk import Checkbutton
from tkinter.filedialog import askopenfilename, asksaveasfilename
from tkinter.messagebox import showwarning
from pyDEA.core.gui_modules.categories_checkbox_gui import CategoriesCheckBox
from pyDEA.core.gui_modules.scrollable_frame_gui import VerticalScrolledFrame
from pyDEA.core.data_processing.parameters import parse_parameters_from_file, Parameters
from pyDEA.core.data_processing.parameters import CATEGORICAL_AND_DATA_FIELDS, write_parameters_to_file
from pyDEA.core.utils.dea_utils import XPAD_VALUE, YPAD_VALUE
from pyDEA.core.gui_modules.weight_frame_gui import WeightFrame
from pyDEA.core.gui_modules.options_frame_gui import OptionsFrame
MAX_FILE_LBL_LENGTH = 400
TEXT_FOR_PARAMS_LBL = 'Parameters loaded from file: '
class ParamsFrame(Notebook):
''' This class implements various operations with parameters like
loading and saving from and to file, modifying parameters values.
Attributes:
parent (Tk object): parent of this widget.
params (Parameters): Parameters object with values of all
parameters.
current_categories (list of str): list of current valid categories.
input_categories (CategoriesCheckBox): frame for displaying
input categories.
output_categories (CategoriesCheckBox): frame for displaying
output categories.
params_from_file_lbl (Label): label for displaying file name if
parameters were loaded from file.
data_from_params_file(StringVar): StringVar object used for
communication of this widget with DataFrame. Changing the
value of data_from_params_file
triggers changes in DataFrame (like clearing all data and
loading data from file).
str_var_for_input_output_boxes (ObserverStringVar):
ObserverStringVar
object used for storing input and output categories and for
tracking changes in input and output categories.
weight_tab (WeightFrame): widget used for displaying and
editing weights.
load_without_data (IntVar): IntVar object used for Checkbutton,
if its value
is 1, then parameters will be loaded from file without data,
if its value
is 0, then parameters will be loaded from file with data.
options_frame (OptionsFrame): widget used for displaying and
modifying some of the parameters.
Args:
parent (Tk object): parent of this widget.
current_categories (list of str): list of current valid categories.
data_from_params_file(StringVar): StringVar object used for
communication of this widget with DataFrame. Changing the value
of data_from_params_file
triggers changes in DataFrame (like clearing all data and
loading data from file).
str_var_for_input_output_boxes (ObserverStringVar):
ObserverStringVar object used for
storing input and output categories and for tracking changes
in input and output categories.
weights_status_str (StringVar): StringVar object used for changing
label of weights editor, for details see WeightFrame.
'''
def __init__(self, parent, current_categories, data_from_params_file,
str_var_for_input_output_boxes, weights_status_str,
*args, **kw):
Notebook.__init__(self, parent, *args, **kw)
self.parent = parent
self.params = Parameters()
self.current_categories = current_categories
self.input_categories_frame = None
self.output_categories_frame = None
self.params_from_file_lbl = None
self.data_from_params_file = data_from_params_file
self.str_var_for_input_output_boxes = str_var_for_input_output_boxes
self.weight_tab = None
self.load_without_data = IntVar()
self.options_frame = None
self.create_widgets(weights_status_str)
def create_widgets(self, weights_status_str):
''' Creates all widgets.
'''
self.enable_traversal()
self._create_params_tab()
self.weight_tab = WeightFrame(self, self.current_categories,
self.params, weights_status_str)
self.add(self.weight_tab, text='Weights editor')
def change_weight_tab_name(self, new_name):
''' Changes name of weights editor tab.
Args:
new_name (str): new name for weights editor tab.
'''
self.tab(1, text=new_name)
def _create_params_tab(self):
''' Creates all widgets of the parameters tab.
'''
frame_for_all_objects = VerticalScrolledFrame(self)
frame_for_all_objects.columnconfigure(0, weight=1)
frame_for_all_objects.rowconfigure(0, weight=1)
params_tab = frame_for_all_objects.interior
params_tab.columnconfigure(0, weight=1, pad=5)
frame_for_save_btns = Frame(params_tab)
frame_for_save_btns.columnconfigure(0, weight=1)
frame_for_save_btns.columnconfigure(1, weight=1)
load_btn = Button(frame_for_save_btns, text='Load parameters',
command=self.load_file)
load_btn.grid(row=0, column=0, sticky=W+N, pady=2)
load_wo_data_box = Checkbutton(frame_for_save_btns,
text='Load without data',
variable=self.load_without_data)
load_wo_data_box.grid(row=1, column=0, sticky=W+N, pady=2)
save_btn = Button(frame_for_save_btns, text='Save parameters',
command=self.on_save_params)
save_btn.grid(row=0, column=1, sticky=E+N, pady=2)
save_btn = Button(frame_for_save_btns, text='Save parameters as...',
command=self.on_save_params_as)
save_btn.grid(row=1, column=1, sticky=E+N, pady=2)
frame_for_save_btns.grid(row=0, column=0, sticky=E+W,
padx=XPAD_VALUE, pady=YPAD_VALUE)
self.params_from_file_lbl = Label(params_tab, text=TEXT_FOR_PARAMS_LBL,
anchor=W, justify=LEFT,
wraplength=MAX_FILE_LBL_LENGTH)
self.params_from_file_lbl.grid(row=1, column=0, columnspan=3,
sticky=W+N, padx=XPAD_VALUE,
pady=YPAD_VALUE)
input_categories_list = CategoriesCheckBox(
params_tab, 'Input categories:', True, self.params,
'INPUT_CATEGORIES')
self.input_categories_frame = input_categories_list
input_categories_list.grid(row=4, column=0, sticky=W+N+S+E,
padx=XPAD_VALUE,
pady=YPAD_VALUE, columnspan=2)
output_categories_list = CategoriesCheckBox(
params_tab, 'Output categories:', False, self.params,
'OUTPUT_CATEGORIES')
self.output_categories_frame = output_categories_list
output_categories_list.grid(row=5, column=0, sticky=W+N+S+E,
padx=XPAD_VALUE,
pady=YPAD_VALUE, columnspan=2)
self.options_frame = OptionsFrame(params_tab, self.params,
self.current_categories,
self.input_categories_frame,
self.output_categories_frame)
self.options_frame.grid(row=6, column=0, columnspan=2,
sticky=N+S+W+E, padx=XPAD_VALUE,
pady=YPAD_VALUE)
self.add(frame_for_all_objects, text='Parameters')
def on_save_params(self):
''' Saves current parameter values to a file from where the\
parameters were loaded.
This file name is displayed. If no file name is displayed
(i.e. parameters were not
previously loaded from file), then asksaveasfilename dialogue
is called.
'''
file_name = self.params_from_file_lbl.cget('text')
if TEXT_FOR_PARAMS_LBL in file_name:
file_name = file_name[len(TEXT_FOR_PARAMS_LBL):]
if file_name:
write_parameters_to_file(self.params, file_name)
else:
self.on_save_params_as()
def on_save_params_as(self):
''' Calls asksaveasfilename dialogue and saves current values of
parameters to
the specified file.
'''
file_name = self._get_file_name_to_save()
if file_name:
write_parameters_to_file(self.params, file_name)
def _get_file_name_to_save(self):
''' Calls asksaveasfilename dialogue. This method is overridden
in unit tests.
Returns:
(str): file name.
'''
return asksaveasfilename(filetypes=[('Text files', '*.txt')],
defaultextension='.txt')
def load_file(self):
''' Loads parameters from file specified by the user.
'''
file_name = self._get_filename_for_load()
if file_name:
self.str_var_for_input_output_boxes.input_categories.clear()
self.str_var_for_input_output_boxes.output_categories.clear()
# save previous params
params_to_restore = dict()
for param_name in CATEGORICAL_AND_DATA_FIELDS:
params_to_restore[param_name] = self.params.get_parameter_value(
param_name)
self.params.copy_all_params(parse_parameters_from_file(file_name))
if self.load_without_data.get() == 0:
self.load_data_file_and_related_params(file_name,
params_to_restore)
else:
self.data_from_params_file.set('')
# restore previous parameters
for param_name, value in params_to_restore.items():
self.params.update_parameter(param_name, value)
self.options_frame.set_params_values()
def _get_filename_for_load(self):
''' Calls askopenfilename dialogue. This method is overridden
in unit tests.
Returns:
(str): file name.
'''
file_types = [('Text files', '*.txt'), ('All files', '*.*')]
file_name = askopenfilename(title='Choose a file', filetypes=file_types)
return file_name
def load_data_file_and_related_params(self, file_name, params_to_restore):
''' Loads data if possible and sets widgets to proper values
depending on parameters.
Args:
file_name (str): file name of file with parameters. It is needed
to display it on parameters frame.
params_to_restore (dict of str to str): dictionary of
previous values of parameters. They are used in order
to restore
previous values if loading of data from file fails.
'''
data_file = self.params.get_parameter_value('DATA_FILE')
norm_data_path = os.path.normpath(data_file)
if os.path.isfile(norm_data_path):
params_to_restore = dict()
# I have to store this here, because when I clean all data
# from data tab, it deletes these values from params
for param_name in CATEGORICAL_AND_DATA_FIELDS:
params_to_restore[param_name] = self.params.get_parameter_value(
param_name)
# this line calls clear all from data_tab
self.data_from_params_file.set(norm_data_path)
self.params_from_file_lbl.config(
text=TEXT_FOR_PARAMS_LBL + file_name)
for param_name, value in params_to_restore.items():
self.params.update_parameter(param_name, value)
self.add_categories(
'INPUT_CATEGORIES', self.input_categories_frame,
self.str_var_for_input_output_boxes.input_categories)
self.add_categories(
'OUTPUT_CATEGORIES', self.output_categories_frame,
self.str_var_for_input_output_boxes.output_categories)
self.str_var_for_input_output_boxes.set('notify')
self.weight_tab.add_weights()
else:
self._show_warning(norm_data_path)
for param_name, value in params_to_restore.items():
self.params.update_parameter(param_name, value)
def _show_warning(self, norm_data_path):
''' Shows warning that data cannot be loaded from file.
This method is overridden in unit tests.
'''
showwarning('Warning', 'Cannot load data file: ' + norm_data_path +
'. Parameters will be loaded without data.')
def change_category_name(self, old_name, new_name):
''' Changes category name in parameters and all widgets to a new name.
If new name is empty string, then some of the parameters might
be lost (for example, weight restrictions will be lost).
Args:
old_name (str): old name of the category.
new_name (str): new name of the category.
'''
if old_name != new_name:
self.input_categories_frame.change_category_name(old_name, new_name)
self.output_categories_frame.change_category_name(old_name,
new_name)
self.weight_tab.add_weights()
if self.options_frame.combobox_text_var.get() == old_name:
self.options_frame.change_categorical_box()
self.options_frame.set_categorical_box(new_name)
def add_categories(self, name, frame, categories_container):
''' Adds input or output categories to a specified widget
with categories from parameters.
Args:
name (str): name of the parameter where categories come from,
possible values INPUT_CATEGORIES, OUTPUT_CATEGORIES.
frame (CategoriesCheckBox): widget where categories will
be added.
categories_container (list of str): list of categories where
categories from parameters will be added.
'''
categories = self.params.get_set_of_parameters(name)
for category in categories:
# we add only categories that are
# present in data file
if category in self.current_categories:
frame.add_category(category)
categories_container.append(category)
else:
self.params.remove_category_from_params(name, category)
def clear_all(self):
''' Clears all parameters and corresponding widgets.
'''
self.input_categories_frame.remove_all_categories()
self.output_categories_frame.remove_all_categories()
self.options_frame.combobox_text_var.set('')
self.weight_tab.remove_all_weights()
self.params.clear_all_categorical_and_data_fields()
self.params_from_file_lbl.config(text='')
self.str_var_for_input_output_boxes.input_categories.clear()
self.str_var_for_input_output_boxes.output_categories.clear()
|
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Window')
from data_400ms import Fmat_original
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf,sigma_rf = feature_to_mu_sigma(Fmat[0:41,0:35])
mu_rm,sigma_rm = feature_to_mu_sigma(Fmat[0:41,35:70])
mu_sf,sigma_sf = feature_to_mu_sigma(Fmat[0:41,70:105])
mu_sm,sigma_sm = feature_to_mu_sigma(Fmat[0:41,105:140])
mu_obj1,sigma_obj1 = feature_to_mu_sigma(Fmat[0:41,140:141])
mu_obj2,sigma_obj2 = feature_to_mu_sigma(Fmat[0:41,141:142])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = np.zeros((10,2))
B_rm = np.zeros((10,2))
B_sf = np.zeros((10,2))
B_sm = np.zeros((10,2))
for num_states in range(10):
B_rf[num_states,0] = mu_rf[num_states]
B_rf[num_states,1] = sigma_rf[num_states]
B_rm[num_states,0] = mu_rm[num_states]
B_rm[num_states,1] = sigma_rm[num_states]
B_sf[num_states,0] = mu_sf[num_states]
B_sf[num_states,1] = sigma_sf[num_states]
B_sm[num_states,0] = mu_sm[num_states]
B_sm[num_states,1] = sigma_sm[num_states]
B_rf = B_rf.tolist()
B_rm = B_rm.tolist()
B_sf = B_sf.tolist()
B_sm = B_sm.tolist()
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[0:41,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:41,1:5]
total_seq_rm = total_seq[0:41,36:40]
total_seq_sf = total_seq[0:41,71:75]
total_seq_sm = total_seq[0:41,106:110]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+1:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+36:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+71:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+106:j+110]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = np.column_stack((total_seq[0:41,0],total_seq[0:41,2:5]))
total_seq_rm = np.column_stack((total_seq[0:41,35],total_seq[0:41,37:40]))
total_seq_sf = np.column_stack((total_seq[0:41,70],total_seq[0:41,72:75]))
total_seq_sm = np.column_stack((total_seq[0:41,105],total_seq[0:41,107:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+0],total_seq[0:41,j+2:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+35],total_seq[0:41,j+37:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+70],total_seq[0:41,j+72:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+105],total_seq[0:41,j+107:j+110]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = np.column_stack((total_seq[0:41,0:2],total_seq[0:41,3:5]))
total_seq_rm = np.column_stack((total_seq[0:41,35:37],total_seq[0:41,38:40]))
total_seq_sf = np.column_stack((total_seq[0:41,70:72],total_seq[0:41,73:75]))
total_seq_sm = np.column_stack((total_seq[0:41,105:107],total_seq[0:41,108:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+0:j+2],total_seq[0:41,j+3:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+35:j+37],total_seq[0:41,j+38:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+70:j+72],total_seq[0:41,j+73:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+105:j+107],total_seq[0:41,j+108:j+110]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = np.column_stack((total_seq[0:41,0:3],total_seq[0:41,4:5]))
total_seq_rm = np.column_stack((total_seq[0:41,35:38],total_seq[0:41,39:40]))
total_seq_sf = np.column_stack((total_seq[0:41,70:73],total_seq[0:41,74:75]))
total_seq_sm = np.column_stack((total_seq[0:41,105:108],total_seq[0:41,109:110]))
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+0:j+3],total_seq[0:41,j+4:j+5]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+35:j+38],total_seq[0:41,j+39:j+40]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+70:j+73],total_seq[0:41,j+74:j+75]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+105:j+108],total_seq[0:41,j+109:j+110]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:41,0:4]
total_seq_rm = total_seq[0:41,35:39]
total_seq_sf = total_seq[0:41,70:74]
total_seq_sm = total_seq[0:41,105:109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+0:j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+35:j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+70:j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+105:j+109]))
j = j+5
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[0:41,0]
total_seq_rm = total_seq[0:41,35]
total_seq_sf = total_seq[0:41,70]
total_seq_sm = total_seq[0:41,105]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+35]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+70]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = total_seq[0:41,1]
total_seq_rm = total_seq[0:41,36]
total_seq_sf = total_seq[0:41,71]
total_seq_sm = total_seq[0:41,106]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+1]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+36]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+71]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+106]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:41,2]
total_seq_rm = total_seq[0:41,37]
total_seq_sf = total_seq[0:41,72]
total_seq_sm = total_seq[0:41,107]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+2]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+37]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+72]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+107]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:41,3]
total_seq_rm = total_seq[0:41,38]
total_seq_sf = total_seq[0:41,73]
total_seq_sm = total_seq[0:41,108]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+3]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+38]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+73]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+108]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:41,4]
total_seq_rm = total_seq[0:41,39]
total_seq_sf = total_seq[0:41,74]
total_seq_sm = total_seq[0:41,109]
while (j < 35):
total_seq_rf = np.column_stack((total_seq_rf,total_seq[0:41,j+4]))
total_seq_rm = np.column_stack((total_seq_rm,total_seq[0:41,j+39]))
total_seq_sf = np.column_stack((total_seq_sf,total_seq[0:41,j+74]))
total_seq_sm = np.column_stack((total_seq_sm,total_seq[0:41,j+109]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:41,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
path_rm_obj = model_rm.viterbi(final_ts_obj)
path_sf_obj = model_sf.viterbi(final_ts_obj)
path_sm_obj = model_sm.viterbi(final_ts_obj)
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
trial_number = trial_number + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 7
i = 0
while (k < 29):
arrsum_rf[i] = np.sum(rf_final[k-7:k,0])
arrsum_rm[i] = np.sum(rm_final[k-7:k,0])
arrsum_sf[i] = np.sum(sf_final[k-7:k,0])
arrsum_sm[i] = np.sum(sm_final[k-7:k,0])
i = i+1
k = k+7
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
"""
import collections
import copy
import httplib
import math
import re
import time
from oslo_serialization import jsonutils
from oslo_utils import importutils
import webob.dec
import webob.exc
from cinder.api.openstack import wsgi
from cinder.api.views import limits as limits_views
from cinder.api import xmlutil
from cinder.i18n import _
from cinder import quota
from cinder import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
# Convenience constants for the limits dictionary passed to Limiter().
PER_SECOND = 1
PER_MINUTE = 60
PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['cinder.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("cinder.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
def create_resource():
return wsgi.Resource(LimitsController())
class Limit(object):
"""Stores information about a limit for HTTP requests."""
UNITS = {
1: "SECOND",
60: "MINUTE",
60 * 60: "HOUR",
60 * 60 * 24: "DAY",
}
UNIT_MAP = dict([(v, k) for k, v in UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = (_("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.") %
{'value': self.value, 'verb': self.verb,
'uri': self.uri, 'unit_string': self.unit_string})
self.error_message = msg
def __call__(self, verb, url):
"""Represent a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 10, PER_MINUTE),
Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
Limit("PUT", "*", ".*", 10, PER_MINUTE),
Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
Limit("DELETE", "*", ".*", 100, PER_MINUTE),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""Rate-limits requests passing through this middleware.
All limit information is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""Initialize new `RateLimitingMiddleware`
This wraps the given WSGI application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Represent a single call through this middleware.
We should record the request if we have a limit relevant to it.
If no limit is relevant to the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("cinder.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.OverLimitFault(msg, error, retry)
req.environ["cinder.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""Rate-limit checking class which handles limits in memory."""
def __init__(self, limits, **kwargs):
"""Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""Return the limits for a given user."""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""Convert a string into a list of Limit instances.
This implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in Limit.UNIT_MAP:
raise ValueError("Invalid units specified")
unit = Limit.UNIT_MAP[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""Rate-limit checking from a WSGI application.
Uses an in-memory `Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""Handles a call to this application.
Returns 204 if the request is acceptable to the limiter, else a 403
is returned with a relevant header indicating when the request
*will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""Rate-limit requests based on answers from a remote source."""
def __init__(self, limiter_address):
"""Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""Ignore a limits string--simply doesn't apply for the limit proxy.
@return: Empty list.
"""
return []
|
|
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.sql.constants import JOIN_TYPE, LHS_ALIAS, LHS_JOIN_COL, \
TABLE_NAME, RHS_JOIN_COL
from django.utils.tree import Node
from djangotoolbox.fields import ListField
from .lookups import StandardLookup
OR = 'OR'
# TODO: optimize code
class BaseResolver(object):
def __init__(self):
# mapping from lookups to indexes
self.index_map = {}
# mapping from column names to field names
self.column_to_name = {}
''' API called by resolver'''
def create_index(self, lookup):
field_to_index = self.get_field_to_index(lookup.model, lookup.field_name)
# backend doesn't now how to handle this index definition
if not field_to_index:
return
index_field = lookup.get_field_to_add(field_to_index)
config_field = index_field.item_field if \
isinstance(index_field, ListField) else index_field
if field_to_index.max_length is not None and \
isinstance(config_field, models.CharField):
config_field.max_length = field_to_index.max_length
# don't install a field if it already exists
try:
lookup.model._meta.get_field(self.index_name(lookup))
except:
lookup.model.add_to_class(self.index_name(lookup), index_field)
self.index_map[lookup] = index_field
self.add_column_to_name(lookup.model, lookup.field_name)
else:
# makes dbindexer unit test compatible
if lookup not in self.index_map:
self.index_map[lookup] = lookup.model._meta.get_field(
self.index_name(lookup))
self.add_column_to_name(lookup.model, lookup.field_name)
def convert_insert_query(self, query):
'''Converts a database saving query.'''
for lookup in self.index_map.keys():
self._convert_insert_query(query, lookup)
def _convert_insert_query(self, query, lookup):
if not lookup.model == query.model:
return
position = self.get_query_position(query, lookup)
if position is None:
return
value = self.get_value(lookup.model, lookup.field_name, query)
if isinstance(value, list):
for i in range(0,len(value)):
setattr(query.objs[i], lookup.index_name, lookup.convert_value(value[i]))
else:
try:
setattr(query.objs[0], lookup.index_name, lookup.convert_value(value))
except Exception, e:
'''
TODO: If lookup.index_name is a foreign key field, we need to set the actual
referenced object, not just the id. When we try to set the id, we get an
exception.
'''
pass
def convert_filters(self, query):
self._convert_filters(query, query.where)
''' helper methods '''
def _convert_filters(self, query, filters):
for index, child in enumerate(filters.children[:]):
if isinstance(child, Node):
self._convert_filters(query, child)
continue
self.convert_filter(query, filters, child, index)
def convert_filter(self, query, filters, child, index):
constraint, lookup_type, annotation, value = child
if constraint.field is None:
return
field_name = self.column_to_name.get(constraint.field.column)
if field_name and constraint.alias == \
query.table_map[query.model._meta.db_table][0]:
for lookup in self.index_map.keys():
if lookup.matches_filter(query.model, field_name, lookup_type,
value):
new_lookup_type, new_value = lookup.convert_lookup(value,
lookup_type)
index_name = self.index_name(lookup)
self._convert_filter(query, filters, child, index,
new_lookup_type, new_value, index_name)
def _convert_filter(self, query, filters, child, index, new_lookup_type,
new_value, index_name):
constraint, lookup_type, annotation, value = child
lookup_type, value = new_lookup_type, new_value
constraint.field = query.get_meta().get_field(index_name)
constraint.col = constraint.field.column
child = constraint, lookup_type, annotation, value
filters.children[index] = child
def index_name(self, lookup):
return lookup.index_name
def get_field_to_index(self, model, field_name):
try:
return model._meta.get_field(field_name)
except:
return None
def get_value(self, model, field_name, query):
field_to_index = self.get_field_to_index(model, field_name)
if field_to_index in query.fields:
values = []
for obj in query.objs:
value = field_to_index.value_from_object(obj)
values.append(value)
if len(values):
return values
raise FieldDoesNotExist('Cannot find field in query.')
def add_column_to_name(self, model, field_name):
column_name = model._meta.get_field(field_name).column
self.column_to_name[column_name] = field_name
def get_index(self, lookup):
return self.index_map[lookup]
def get_query_position(self, query, lookup):
for index, field in enumerate(query.fields):
if field is self.get_index(lookup):
return index
return None
def unref_alias(query, alias):
table_name = query.alias_map[alias][TABLE_NAME]
query.alias_refcount[alias] -= 1
if query.alias_refcount[alias] < 1:
# Remove all information about the join
del query.alias_refcount[alias]
del query.join_map[query.rev_join_map[alias]]
del query.rev_join_map[alias]
del query.alias_map[alias]
query.tables.remove(alias)
query.table_map[table_name].remove(alias)
if len(query.table_map[table_name]) == 0:
del query.table_map[table_name]
query.used_aliases.discard(alias)
class FKNullFix(BaseResolver):
'''
Django doesn't generate correct code for ForeignKey__isnull.
It becomes a JOIN with pk__isnull which won't work on nonrel DBs,
so we rewrite the JOIN here.
'''
def create_index(self, lookup):
pass
def convert_insert_query(self, query):
pass
def convert_filter(self, query, filters, child, index):
constraint, lookup_type, annotation, value = child
if constraint.field is not None and lookup_type == 'isnull' and \
isinstance(constraint.field, models.ForeignKey):
self.fix_fk_null_filter(query, constraint)
def unref_alias(self, query, alias):
unref_alias(query, alias)
def fix_fk_null_filter(self, query, constraint):
alias = constraint.alias
table_name = query.alias_map[alias][TABLE_NAME]
lhs_join_col = query.alias_map[alias][LHS_JOIN_COL]
rhs_join_col = query.alias_map[alias][RHS_JOIN_COL]
if table_name != constraint.field.rel.to._meta.db_table or \
rhs_join_col != constraint.field.rel.to._meta.pk.column or \
lhs_join_col != constraint.field.column:
return
next_alias = query.alias_map[alias][LHS_ALIAS]
if not next_alias:
return
self.unref_alias(query, alias)
alias = next_alias
constraint.col = constraint.field.column
constraint.alias = alias
class ConstantFieldJOINResolver(BaseResolver):
def create_index(self, lookup):
if '__' in lookup.field_name:
super(ConstantFieldJOINResolver, self).create_index(lookup)
def convert_insert_query(self, query):
'''Converts a database saving query.'''
for lookup in self.index_map.keys():
if '__' in lookup.field_name:
self._convert_insert_query(query, lookup)
def convert_filter(self, query, filters, child, index):
constraint, lookup_type, annotation, value = child
field_chain = self.get_field_chain(query, constraint)
if field_chain is None:
return
for lookup in self.index_map.keys():
if lookup.matches_filter(query.model, field_chain, lookup_type,
value):
self.resolve_join(query, child)
new_lookup_type, new_value = lookup.convert_lookup(value,
lookup_type)
index_name = self.index_name(lookup)
self._convert_filter(query, filters, child, index,
new_lookup_type, new_value, index_name)
def get_field_to_index(self, model, field_name):
model = self.get_model_chain(model, field_name)[-1]
field_name = field_name.split('__')[-1]
return super(ConstantFieldJOINResolver, self).get_field_to_index(model,
field_name)
def get_value(self, model, field_name, query):
value = super(ConstantFieldJOINResolver, self).get_value(model,
field_name.split('__')[0],
query)
if isinstance(value, list):
value = value[0]
if value is not None:
value = self.get_target_value(model, field_name, value)
return value
def get_field_chain(self, query, constraint):
if constraint.field is None:
return
column_index = self.get_column_index(query, constraint)
return self.column_to_name.get(column_index)
def get_model_chain(self, model, field_chain):
model_chain = [model, ]
for value in field_chain.split('__')[:-1]:
model = model._meta.get_field(value).rel.to
model_chain.append(model)
return model_chain
def get_target_value(self, start_model, field_chain, pk):
fields = field_chain.split('__')
foreign_key = start_model._meta.get_field(fields[0])
if not foreign_key.rel:
# field isn't a related one, so return the value itself
return pk
target_model = foreign_key.rel.to
foreignkey = target_model.objects.all().get(pk=pk)
for value in fields[1:-1]:
foreignkey = getattr(foreignkey, value)
if isinstance(foreignkey._meta.get_field(fields[-1]), models.ForeignKey):
return getattr(foreignkey, '%s_id' % fields[-1])
else:
return getattr(foreignkey, fields[-1])
def add_column_to_name(self, model, field_name):
model_chain = self.get_model_chain(model, field_name)
column_chain = ''
field_names = field_name.split('__')
for model, name in zip(model_chain, field_names):
column_chain += model._meta.get_field(name).column + '__'
self.column_to_name[column_chain[:-2]] = field_name
def unref_alias(self, query, alias):
unref_alias(query, alias)
def get_column_index(self, query, constraint):
if constraint.field:
column_chain = constraint.field.column
alias = constraint.alias
while alias:
join = query.alias_map.get(alias)
if join and join[JOIN_TYPE] == 'INNER JOIN':
column_chain += '__' + join[LHS_JOIN_COL]
alias = query.alias_map[alias][LHS_ALIAS]
else:
alias = None
return '__'.join(reversed(column_chain.split('__')))
def resolve_join(self, query, child):
constraint, lookup_type, annotation, value = child
if not constraint.field:
return
alias = constraint.alias
while True:
next_alias = query.alias_map[alias][LHS_ALIAS]
if not next_alias:
break
self.unref_alias(query, alias)
alias = next_alias
constraint.alias = alias
# TODO: distinguish in memory joins from standard joins somehow
class InMemoryJOINResolver(ConstantFieldJOINResolver):
def __init__(self):
self.field_chains = []
super(InMemoryJOINResolver, self).__init__()
def create_index(self, lookup):
if '__' in lookup.field_name:
field_to_index = self.get_field_to_index(lookup.model, lookup.field_name)
if not field_to_index:
return
# save old column_to_name so we can make in memory queries later on
self.add_column_to_name(lookup.model, lookup.field_name)
# don't add an extra field for standard lookups!
if isinstance(lookup, StandardLookup):
return
# install lookup on target model
model = self.get_model_chain(lookup.model, lookup.field_name)[-1]
lookup.model = model
lookup.field_name = lookup.field_name.split('__')[-1]
super(ConstantFieldJOINResolver, self).create_index(lookup)
def convert_insert_query(self, query):
super(ConstantFieldJOINResolver, self).convert_insert_query(query)
def _convert_filters(self, query, filters):
# or queries are not supported for in-memory-JOINs
if self.contains_OR(query.where, OR):
return
# start with the deepest JOIN level filter!
all_filters = self.get_all_filters(filters)
all_filters.sort(key=lambda item: self.get_field_chain(query, item[1][0]) and \
-len(self.get_field_chain(query, item[1][0])) or 0)
for filters, child, index in all_filters:
# check if convert_filter removed a given child from the where-tree
if not self.contains_child(query.where, child):
continue
self.convert_filter(query, filters, child, index)
def convert_filter(self, query, filters, child, index):
constraint, lookup_type, annotation, value = child
field_chain = self.get_field_chain(query, constraint)
if field_chain is None:
return
if '__' not in field_chain:
return super(ConstantFieldJOINResolver, self).convert_filter(query,
filters, child, index)
pks = self.get_pks(query, field_chain, lookup_type, value)
self.resolve_join(query, child)
self._convert_filter(query, filters, child, index, 'in',
(pk for pk in pks), field_chain.split('__')[0])
def tree_contains(self, filters, to_find, func):
result = False
for child in filters.children[:]:
if func(child, to_find):
result = True
break
if isinstance(child, Node):
result = self.tree_contains(child, to_find, func)
if result:
break
return result
def contains_OR(self, filters, or_):
return self.tree_contains(filters, or_,
lambda c, f: isinstance(c, Node) and c.connector == f)
def contains_child(self, filters, to_find):
return self.tree_contains(filters, to_find, lambda c, f: c is f)
def get_all_filters(self, filters):
all_filters = []
for index, child in enumerate(filters.children[:]):
if isinstance(child, Node):
all_filters.extend(self.get_all_filters(child))
continue
all_filters.append((filters, child, index))
return all_filters
def index_name(self, lookup):
# use another index_name to avoid conflicts with lookups defined on the
# target model which are handled by the BaseBackend
return lookup.index_name + '_in_memory_join'
def get_pks(self, query, field_chain, lookup_type, value):
model_chain = self.get_model_chain(query.model, field_chain)
first_lookup = {'%s__%s' %(field_chain.rsplit('__', 1)[-1],
lookup_type): value}
self.combine_with_same_level_filter(first_lookup, query, field_chain)
pks = model_chain[-1].objects.all().filter(**first_lookup).values_list(
'id', flat=True)
chains = [field_chain.rsplit('__', i+1)[0]
for i in range(field_chain.count('__'))]
lookup = {}
for model, chain in reversed(zip(model_chain[1:-1], chains[:-1])):
lookup.update({'%s__%s' %(chain.rsplit('__', 1)[-1], 'in'):
(pk for pk in pks)})
self.combine_with_same_level_filter(lookup, query, chain)
pks = model.objects.all().filter(**lookup).values_list('id', flat=True)
return pks
def combine_with_same_level_filter(self, lookup, query, field_chain):
lookup_updates = {}
field_chains = self.get_all_field_chains(query, query.where)
for chain, child in field_chains.items():
if chain == field_chain:
continue
if field_chain.rsplit('__', 1)[0] == chain.rsplit('__', 1)[0]:
lookup_updates ['%s__%s' %(chain.rsplit('__', 1)[1], child[1])] \
= child[3]
self.remove_child(query.where, child)
self.resolve_join(query, child)
# TODO: update query.alias_refcount correctly!
lookup.update(lookup_updates)
def remove_child(self, filters, to_remove):
''' Removes a child object from filters. If filters doesn't contain
children afterwoods, filters will be removed from its parent. '''
for child in filters.children[:]:
if child is to_remove:
self._remove_child(filters, to_remove)
return
elif isinstance(child, Node):
self.remove_child(child, to_remove)
if hasattr(child, 'children') and not child.children:
self.remove_child(filters, child)
def _remove_child(self, filters, to_remove):
result = []
for child in filters.children[:]:
if child is to_remove:
continue
result.append(child)
filters.children = result
def get_all_field_chains(self, query, filters):
''' Returns a dict mapping from field_chains to the corresponding child.'''
field_chains = {}
all_filters = self.get_all_filters(filters)
for filters, child, index in all_filters:
field_chain = self.get_field_chain(query, child[0])
# field_chain can be None if the user didn't specified an index for it
if field_chain:
field_chains[field_chain] = child
return field_chains
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import logging
from migrate import exceptions
from migrate.versioning import pathed, script
log = logging.getLogger(__name__)
class VerNum(object):
"""A version number that behaves like a string and int at the same time"""
_instances = dict()
def __new__(cls, value):
val = str(value)
if val not in cls._instances:
cls._instances[val] = super(VerNum, cls).__new__(cls)
ret = cls._instances[val]
return ret
def __init__(self,value):
self.value = str(int(value))
if self < 0:
raise ValueError("Version number cannot be negative")
def __add__(self, value):
ret = int(self) + int(value)
return VerNum(ret)
def __sub__(self, value):
return self + (int(value) * -1)
def __cmp__(self, value):
return int(self) - int(value)
def __repr__(self):
return "<VerNum(%s)>" % self.value
def __str__(self):
return str(self.value)
def __int__(self):
return int(self.value)
class Collection(pathed.Pathed):
"""A collection of versioning scripts in a repository"""
FILENAME_WITH_VERSION = re.compile(r'^(\d{3,}).*')
def __init__(self, path):
"""Collect current version scripts in repository
and store them in self.versions
"""
super(Collection, self).__init__(path)
# Create temporary list of files, allowing skipped version numbers.
files = os.listdir(path)
if '1' in files:
# deprecation
raise Exception('It looks like you have a repository in the old '
'format (with directories for each version). '
'Please convert repository before proceeding.')
tempVersions = dict()
for filename in files:
match = self.FILENAME_WITH_VERSION.match(filename)
if match:
num = int(match.group(1))
tempVersions.setdefault(num, []).append(filename)
else:
pass # Must be a helper file or something, let's ignore it.
# Create the versions member where the keys
# are VerNum's and the values are Version's.
self.versions = dict()
for num, files in tempVersions.items():
self.versions[VerNum(num)] = Version(num, path, files)
@property
def latest(self):
""":returns: Latest version in Collection"""
return max([VerNum(0)] + self.versions.keys())
def create_new_python_version(self, description, **k):
"""Create Python files for new version"""
ver = self.latest + 1
extra = str_to_filename(description)
if extra:
if extra == '_':
extra = ''
elif not extra.startswith('_'):
extra = '_%s' % extra
filename = '%03d%s.py' % (ver, extra)
filepath = self._version_path(filename)
script.PythonScript.create(filepath, **k)
self.versions[ver] = Version(ver, self.path, [filename])
def create_new_sql_version(self, database, **k):
"""Create SQL files for new version"""
ver = self.latest + 1
self.versions[ver] = Version(ver, self.path, [])
# Create new files.
for op in ('upgrade', 'downgrade'):
filename = '%03d_%s_%s.sql' % (ver, database, op)
filepath = self._version_path(filename)
script.SqlScript.create(filepath, **k)
self.versions[ver].add_script(filepath)
def version(self, vernum=None):
"""Returns latest Version if vernum is not given.
Otherwise, returns wanted version"""
if vernum is None:
vernum = self.latest
return self.versions[VerNum(vernum)]
@classmethod
def clear(cls):
super(Collection, cls).clear()
def _version_path(self, ver):
"""Returns path of file in versions repository"""
return os.path.join(self.path, str(ver))
class Version(object):
"""A single version in a collection
:param vernum: Version Number
:param path: Path to script files
:param filelist: List of scripts
:type vernum: int, VerNum
:type path: string
:type filelist: list
"""
def __init__(self, vernum, path, filelist):
self.version = VerNum(vernum)
# Collect scripts in this folder
self.sql = dict()
self.python = None
for script in filelist:
self.add_script(os.path.join(path, script))
def script(self, database=None, operation=None):
"""Returns SQL or Python Script"""
for db in (database, 'default'):
# Try to return a .sql script first
try:
return self.sql[db][operation]
except KeyError:
continue # No .sql script exists
# TODO: maybe add force Python parameter?
ret = self.python
assert ret is not None, \
"There is no script for %d version" % self.version
return ret
def add_script(self, path):
"""Add script to Collection/Version"""
if path.endswith(Extensions.py):
self._add_script_py(path)
elif path.endswith(Extensions.sql):
self._add_script_sql(path)
SQL_FILENAME = re.compile(r'^(\d+)_([^_]+)_([^_]+).sql')
def _add_script_sql(self, path):
basename = os.path.basename(path)
match = self.SQL_FILENAME.match(basename)
if match:
version, dbms, op = match.group(1), match.group(2), match.group(3)
else:
raise exceptions.ScriptError(
"Invalid SQL script name %s " % basename + \
"(needs to be ###_database_operation.sql)")
# File the script into a dictionary
self.sql.setdefault(dbms, {})[op] = script.SqlScript(path)
def _add_script_py(self, path):
if self.python is not None:
raise exceptions.ScriptError('You can only have one Python script '
'per version, but you have: %s and %s' % (self.python, path))
self.python = script.PythonScript(path)
class Extensions:
"""A namespace for file extensions"""
py = 'py'
sql = 'sql'
def str_to_filename(s):
"""Replaces spaces, (double and single) quotes
and double underscores to underscores
"""
s = s.replace(' ', '_').replace('"', '_').replace("'", '_').replace(".", "_")
while '__' in s:
s = s.replace('__', '_')
return s
|
|
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles ([email protected])
#
# TODO(todd): Refactor the configuration manager into a configuration manager
# class (to remove the extensive use of globals currently used).
# TODO(todd): Add a main and allow running configuration manager alone to view
# the current configuration, and potentially edit it.
"""Configuration management for the lightshow.
Configuration files are all located in the <homedir>/config directory. This
file contains tools to manage these configuration files.
"""
import ConfigParser
import ast
import datetime
import fcntl
import logging
import os
import os.path
import sys
import warnings
import json
# The home directory and configuration directory for the application.
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, "
"see readme")
sys.exit()
CONFIG_DIR = HOME_DIR + '/config'
LOG_DIR = HOME_DIR + '/logs'
# Load configuration file, loads defaults from config directory, and then
# overrides from the same directory cfg file, then from /home/pi/.lights.cfg
# and then from ~/.lights.cfg (which will be the root's home).
CONFIG = ConfigParser.RawConfigParser(allow_no_value=True)
CONFIG.readfp(open(CONFIG_DIR + '/defaults.cfg'))
CONFIG.read([CONFIG_DIR + '/overrides.cfg', '/home/pi/.lights.cfg',
os.path.expanduser('~/.lights.cfg')])
def _as_list(list_str, delimiter=','):
"""Return a list of items from a delimited string (after stripping
whitespace).
:param list_str: string to turn into a list
:type list_str: str
:param delimiter: split the string on this
:type delimiter: str
:return: string converted to a list
:rtype: list
"""
return [str.strip(item) for item in list_str.split(delimiter)]
# Retrieve hardware configuration
_HARDWARE_CONFIG = dict()
def hardware():
"""Retrieves the hardware configuration
loading and parsing it from a file if necessary.
:return: _HARDWARE_CONFIG
:rtype: dict
"""
global _HARDWARE_CONFIG
if len(_HARDWARE_CONFIG) == 0:
_HARDWARE_CONFIG = dict(CONFIG.items('hardware'))
# Devices
devices = dict()
try:
devices = json.loads(_HARDWARE_CONFIG['devices'])
except Exception as error:
logging.error("devices not defined or not in JSON format."
+ str(error))
_HARDWARE_CONFIG["devices"] = {d.lower(): v
for d,v in devices.iteritems()}
return _HARDWARE_CONFIG
# Retrieve light show configuration
_LIGHTSHOW_CONFIG = dict()
def lightshow():
"""Retrieve the lightshow configuration
loading and parsing it from a file as necessary.
:return: _LIGHTSHOW_CONFIG
:rtype: dict
"""
global _LIGHTSHOW_CONFIG
if len(_LIGHTSHOW_CONFIG) == 0:
_LIGHTSHOW_CONFIG = dict(CONFIG.items('lightshow'))
_LIGHTSHOW_CONFIG['audio_in_channels'] = \
CONFIG.getint('lightshow', 'audio_in_channels')
_LIGHTSHOW_CONFIG['audio_in_sample_rate'] = \
CONFIG.getint('lightshow', 'audio_in_sample_rate')
# setup up preshow
preshow = None
if (CONFIG.get('lightshow',
'preshow_configuration') and
not CONFIG.get('lightshow', 'preshow_script')):
try:
preshow = \
json.loads(_LIGHTSHOW_CONFIG['preshow_configuration'])
except (ValueError, TypeError) as error:
logging.error("Preshow_configuration not defined or not in "
"JSON format." + str(error))
else:
if os.path.isfile(_LIGHTSHOW_CONFIG['preshow_script']):
preshow = _LIGHTSHOW_CONFIG['preshow_script']
_LIGHTSHOW_CONFIG['preshow'] = preshow
# setup postshow
postshow = None
if (CONFIG.get('lightshow',
'postshow_configuration') and
not CONFIG.get('lightshow', 'postshow_script')):
try:
postshow = \
json.loads(_LIGHTSHOW_CONFIG['postshow_configuration'])
except (ValueError, TypeError) as error:
logging.error("Postshow_configuration not "
"defined or not in JSON format." + str(error))
else:
if os.path.isfile(_LIGHTSHOW_CONFIG['postshow_script']):
postshow = _LIGHTSHOW_CONFIG['postshow_script']
_LIGHTSHOW_CONFIG['postshow'] = postshow
return _LIGHTSHOW_CONFIG
_SMS_CONFIG = dict()
_WHO_CAN = dict()
def sms():
"""Retrieves and validates sms configuration
:return: _SMS_CONFIG
:rtype: dict
"""
global _SMS_CONFIG, _WHO_CAN
if len(_SMS_CONFIG) == 0:
_SMS_CONFIG = dict(CONFIG.items('sms'))
_WHO_CAN = dict()
_WHO_CAN['all'] = set()
# Commands
_SMS_CONFIG['commands'] = _as_list(_SMS_CONFIG['commands'])
for cmd in _SMS_CONFIG['commands']:
try:
_SMS_CONFIG[cmd + '_aliases'] = _as_list(_SMS_CONFIG[cmd + '_aliases'])
except KeyError:
_SMS_CONFIG[cmd + '_aliases'] = []
_WHO_CAN[cmd] = set()
# Groups / Permissions
_SMS_CONFIG['groups'] = _as_list(_SMS_CONFIG['groups'])
_SMS_CONFIG['throttled_groups'] = dict()
for group in _SMS_CONFIG['groups']:
try:
_SMS_CONFIG[group + '_users'] = _as_list(_SMS_CONFIG[group
+ '_users'])
except KeyError:
_SMS_CONFIG[group + '_users'] = []
try:
_SMS_CONFIG[group + '_commands'] = _as_list(_SMS_CONFIG[group
+ '_commands'])
except KeyError:
_SMS_CONFIG[group + '_commands'] = []
for cmd in _SMS_CONFIG[group + '_commands']:
for user in _SMS_CONFIG[group + '_users']:
_WHO_CAN[cmd].add(user)
# Throttle
try:
throttled_group_definitions = _as_list(_SMS_CONFIG[group
+ '_throttle'])
throttled_group = dict()
for definition in throttled_group_definitions:
definition = definition.split(':')
if len(definition) != 2:
warnings.warn(group + "_throttle definitions should "
"be in the form "
+ "[command]:<limit> - "
+ ":".join(definition))
continue
throttle_command = definition[0]
throttle_limit = int(definition[1])
throttled_group[throttle_command] = throttle_limit
_SMS_CONFIG['throttled_groups'][group] = throttled_group
except KeyError:
warnings.warn("Throttle definition either does not exist or "
"is configured incorrectly for group: " + group)
# Blacklist
_SMS_CONFIG['blacklist'] = _as_list(_SMS_CONFIG['blacklist'])
return _SMS_CONFIG
_SONG_LIST = []
def songs():
"""Retrieve the song list
:return: a list of songs
:rtype: list
"""
if len(_SONG_LIST) == 0:
pass # TODO(todd): Load playlist if not already loaded, also refactor
# the code that loads the playlist in check_sms and
# synchronzied_lights such that we don't duplicate it
# there.
return _SONG_LIST
def set_songs(song_list):
"""Sets the list of songs
if loaded elsewhere, as is done by check_sms for example
:param song_list: a list of songs
:type song_list: list
"""
global _SONG_LIST
_SONG_LIST = song_list
##############################
# Application State Utilities
##############################
# Load application state configuration file from CONFIG directory.
STATE = ConfigParser.RawConfigParser()
STATE_SECTION = 'do_not_modify'
STATE_FILENAME = CONFIG_DIR + '/state.cfg'
# Ensure state file has been created
if not os.path.isfile(STATE_FILENAME):
open(STATE_FILENAME, 'a').close()
def load_state():
"""Force the state to be reloaded form disk."""
with open(STATE_FILENAME) as state_fp:
fcntl.lockf(state_fp, fcntl.LOCK_SH)
STATE.readfp(state_fp, STATE_FILENAME)
fcntl.lockf(state_fp, fcntl.LOCK_UN)
load_state() # Do an initial load
def get_state(name, default=''):
"""
Get application state
Return the value of a specific application state variable, or the specified
default if not able to load it from the state file
:param name: option to load from state file
:type name: str
:param default: return if not able to load option from state file
:type default: str
:return: the current state
:rtype: str
"""
try:
return STATE.get(STATE_SECTION, name)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return default
def update_state(name, value):
"""Update the application state (name / value pair)
:param name: option name to update
:type name: str
:param value: value to update option name to
:type value: str
"""
value = str(value)
logging.info('Updating application state {%s: %s}', name, value)
try:
STATE.add_section(STATE_SECTION)
except ConfigParser.DuplicateSectionError:
pass # Ok, it's already there
STATE.set(STATE_SECTION, name, value)
with open(STATE_FILENAME, 'wb') as state_fp:
fcntl.lockf(state_fp, fcntl.LOCK_EX)
STATE.write(state_fp)
fcntl.lockf(state_fp, fcntl.LOCK_UN)
def has_permission(user, cmd):
"""Returns True if a user has permission to execute the given command
:param user: the user trying to execute the command
:type user: str
:param cmd: the command at question
:type cmd: str
:return: user has permission
:rtype: bool
"""
blacklisted = user in sms()['blacklist']
return not blacklisted and (user in _WHO_CAN['all']
or 'all' in _WHO_CAN[cmd]
or user in _WHO_CAN[cmd])
def is_throttle_exceeded(cmd, user):
"""Returns True if the throttle has been exceeded and False otherwise
:param cmd: the command at question
:type cmd: str
:param user: the user trying to execute the command
:type user: str
:return: has throttle been exceeded
:rtype: bool
"""
# Load throttle STATE
load_state()
throttle_state = ast.literal_eval(get_state('throttle', '{}'))
process_command_flag = -1
# Analyze throttle timing
current_timestamp = datetime.datetime.now()
throttle_timelimit = _SMS_CONFIG['throttle_time_limit_seconds']
throttle_starttime = datetime.datetime.strptime(
throttle_state['throttle_timestamp_start'], '%Y-%m-%d %H:%M:%S.%f') \
if "throttle_timestamp_start" in throttle_state else current_timestamp
throttle_stoptime = \
throttle_starttime + datetime.timedelta(seconds=int(throttle_timelimit))
# Compare times and see if we need to reset the throttle STATE
if (current_timestamp == throttle_starttime) or \
(throttle_stoptime < current_timestamp):
# There is no time recorded or the time has
# expired reset the throttle STATE
throttle_state = dict()
throttle_state['throttle_timestamp_start'] = str(current_timestamp)
update_state('throttle', str(throttle_state))
# ANALYZE THE THROTTLE COMMANDS AND LIMITS
all_throttle_limit = -1
cmd_throttle_limit = -1
# Check to see what group belongs to starting with the first group declared
throttled_group = None
for group in _SMS_CONFIG['groups']:
userlist = _SMS_CONFIG[group + "_users"]
if user in userlist:
# The user belongs to this group, check if there
# are any throttle definitions
if group in _SMS_CONFIG['throttled_groups']:
# The group has throttle commands defined,
# now check if the command is defined
throttled_commands = _SMS_CONFIG['throttled_groups'][group]
# Check if all command exists
if "all" in throttled_commands:
all_throttle_limit = int(throttled_commands['all'])
# Check if the command passed is defined
if cmd in throttled_commands:
cmd_throttle_limit = int(throttled_commands[cmd])
# A throttle definition was found,
# we no longer need to check anymore groups
if all_throttle_limit != -1 or cmd_throttle_limit != -1:
throttled_group = group
break
# Process the throttle settings that were found for the throttled group
if not throttled_group:
# No throttle limits were found for any group
return False
else:
# Throttle limits were found, check them against throttle STATE limits
if throttled_group in throttle_state:
group_throttle_state = throttle_state[throttled_group]
else:
group_throttle_state = dict()
if cmd in group_throttle_state:
group_throttle_cmd_limit = int(group_throttle_state[cmd])
else:
group_throttle_cmd_limit = 0
# Check to see if we need to apply "all"
if all_throttle_limit != -1:
groupthrottlealllimit = \
int(group_throttle_state['all']) if 'all' in group_throttle_state else 0
# Check if "all" throttle limit has been reached
if groupthrottlealllimit < all_throttle_limit:
# Not Reached, bump throttle and record
groupthrottlealllimit += 1
group_throttle_state['all'] = groupthrottlealllimit
throttle_state[throttled_group] = group_throttle_state
process_command_flag = False
else:
# "all" throttle has been reached we
# dont want to process anything else
return True
# Check to see if we need to apply "cmd"
if cmd_throttle_limit != -1:
if group_throttle_cmd_limit < cmd_throttle_limit:
# Not reached, bump throttle
group_throttle_cmd_limit += 1
group_throttle_state[cmd] = group_throttle_cmd_limit
throttle_state[throttled_group] = group_throttle_state
process_command_flag = False
# Record the updatedthrottle STATE and return
update_state('throttle', throttle_state)
return process_command_flag
|
|
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# For doxygen class doc generation:
r"""
\mainpage Class Documentation for pyrax
This module provides the Python Language Bindings for creating applications
built on the Rackspace / OpenStack Cloud.<br />
The source code for <b>pyrax</b> can be found at:
http://github.com/pycontribs/pyrax
"""
from __future__ import absolute_import, unicode_literals
from functools import wraps
import inspect
import logging
import os
import re
import six.moves.configparser as ConfigParser
import warnings
# keyring is an optional import
try:
import keyring
except ImportError:
keyring = None
# The following try block is only needed when first installing pyrax,
# since importing the version info in setup.py tries to import this
# entire module.
try:
from .identity import *
from . import exceptions as exc
from . import http
from . import version
__version__ = version.version
from novaclient import exceptions as _cs_exceptions
from novaclient import auth_plugin as _cs_auth_plugin
from novaclient import client as nc
from novaclient import client as _cs_client
from novaclient import API_MAX_VERSION as _cs_max_version
from novaclient.v2.servers import Server as CloudServer
from .autoscale import AutoScaleClient
from .cloudcdn import CloudCDNClient
from .clouddatabases import CloudDatabaseClient
from .cloudloadbalancers import CloudLoadBalancerClient
from .cloudblockstorage import CloudBlockStorageClient
from .clouddns import CloudDNSClient
from .cloudnetworks import CloudNetworkClient
from .cloudmonitoring import CloudMonitorClient
from .image import ImageClient
from .object_storage import StorageClient
from .queueing import QueueClient
except ImportError:
# See if this is the result of the importing of version.py in setup.py
callstack = inspect.stack()
in_setup = False
for stack in callstack:
if stack[1].endswith("/setup.py"):
in_setup = True
if not in_setup:
# This isn't a normal import problem during setup; re-raise
raise
# Initiate the services to None until we are authenticated.
cloudservers = None
cloudfiles = None
cloud_cdn = None
cloud_loadbalancers = None
cloud_databases = None
cloud_blockstorage = None
cloud_dns = None
cloud_networks = None
cloud_monitoring = None
autoscale = None
images = None
queues = None
# Default region for all services. Can be individually overridden if needed
default_region = None
# Encoding to use when working with non-ASCII names
default_encoding = "utf-8"
# Config settings
settings = {}
_environment = "default"
identity = None
# Value to plug into the user-agent headers
USER_AGENT = "pyrax/%s" % version.version
# Do we output HTTP traffic for debugging?
_http_debug = False
# Regions and services available from the service catalog
regions = tuple()
services = tuple()
_client_classes = {
"compute": _cs_client.get_client_class(_cs_max_version),
"cdn": CloudCDNClient,
"object_store": StorageClient,
"database": CloudDatabaseClient,
"load_balancer": CloudLoadBalancerClient,
"volume": CloudBlockStorageClient,
"dns": CloudDNSClient,
"compute:network": CloudNetworkClient,
"monitor": CloudMonitorClient,
"autoscale": AutoScaleClient,
"image": ImageClient,
"queues": QueueClient,
}
def _id_type(ityp):
"""Allow for shorthand names for the most common types."""
if ityp.lower() == "rackspace":
ityp = "rax_identity.RaxIdentity"
elif ityp.lower() == "keystone":
ityp = "keystone_identity.KeystoneIdentity"
return ityp
def _import_identity(import_str):
try:
import_str = _id_type(import_str)
full_str = "pyrax.identity.%s" % import_str
return utils.import_class(full_str)
except ImportError:
pass
return utils.import_class(import_str)
class Settings(object):
"""
Holds and manages the settings for pyrax.
"""
_environment = None
env_dct = {
"identity_type": "CLOUD_ID_TYPE",
"auth_endpoint": "CLOUD_AUTH_ENDPOINT",
"keyring_username": "CLOUD_KEYRING_USER",
"region": "CLOUD_REGION",
"tenant_id": "CLOUD_TENANT_ID",
"tenant_name": "CLOUD_TENANT_NAME",
"encoding": "CLOUD_ENCODING",
"custom_user_agent": "CLOUD_USER_AGENT",
"debug": "CLOUD_DEBUG",
"verify_ssl": "CLOUD_VERIFY_SSL",
"use_servicenet": "USE_SERVICENET",
}
_settings = {"default": dict.fromkeys(list(env_dct.keys()))}
_default_set = False
def __init__(self, *args, **kwargs):
# Default verify_ssl to True
if self._settings["default"].get("verify_ssl") is None:
self._settings["default"]["verify_ssl"] = True
super(Settings, self).__init__(*args, **kwargs)
def get(self, key, env=None):
"""
Returns the config setting for the specified environment. If no
environment is specified, the value for the current environment is
returned. If an unknown key or environment is passed, None is returned.
"""
if env is None:
env = self.environment
try:
ret = self._settings[env][key]
except KeyError:
ret = None
if ret is None:
# See if it's set in the environment
if key == "identity_class":
# This is defined via the identity_type
env_var = self.env_dct.get("identity_type")
ityp = os.environ.get(env_var)
if ityp:
return _import_identity(ityp)
else:
env_var = self.env_dct.get(key)
if env_var is not None:
ret = os.environ.get(env_var)
return ret
def set(self, key, val, env=None):
"""
Changes the value for the setting specified by 'key' to the new value.
By default this will change the current environment, but you can change
values in other environments by passing the name of that environment as
the 'env' parameter.
"""
if env is None:
env = self.environment
else:
if env not in self._settings:
raise exc.EnvironmentNotFound("There is no environment named "
"'%s'." % env)
dct = self._settings[env]
if key not in dct:
raise exc.InvalidSetting("The setting '%s' is not defined." % key)
dct[key] = val
if key == "identity_type":
# If setting the identity_type, also change the identity_class.
dct["identity_class"] = _import_identity(val)
elif key == "region":
if not identity:
return
current = identity.region
if current == val:
return
if "LON" in (current, val):
# This is an outlier, as it has a separate auth
identity.region = val
elif key == "verify_ssl":
if not identity:
return
identity.verify_ssl = val
def _getEnvironment(self):
return self._environment or "default"
def _setEnvironment(self, val):
if val not in self._settings:
raise exc.EnvironmentNotFound("The environment '%s' has not been "
"defined." % val)
if val != self.environment:
self._environment = val
clear_credentials()
_create_identity()
environment = property(_getEnvironment, _setEnvironment, None,
"""Users can define several environments for use with pyrax. This
holds the name of the current environment they are working in.
Changing this value will discard any existing authentication
credentials, and will set all the individual clients for cloud
services, such as `pyrax.cloudservers`, to None. You must
authenticate against the new environment with the credentials
appropriate for that cloud provider.""")
@property
def environments(self):
return list(self._settings.keys())
def read_config(self, config_file):
"""
Parses the specified configuration file and stores the values. Raises
an InvalidConfigurationFile exception if the file is not well-formed.
"""
cfg = ConfigParser.SafeConfigParser()
try:
cfg.read(config_file)
except ConfigParser.MissingSectionHeaderError as e:
# The file exists, but doesn't have the correct format.
raise exc.InvalidConfigurationFile(e)
def safe_get(section, option, default=None):
try:
return cfg.get(section, option)
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return default
# A common mistake is including credentials in the config file. If any
# values are found, issue a warning so that the developer can correct
# this problem.
creds_found = False
for section in cfg.sections():
if section == "settings":
section_name = "default"
self._default_set = True
else:
section_name = section
# Check for included credentials
for key in ("username", "password", "api_key"):
if creds_found:
break
if safe_get(section, key):
creds_found = True
dct = self._settings[section_name] = {}
dct["region"] = safe_get(section, "region", default_region)
ityp = safe_get(section, "identity_type")
if ityp:
dct["identity_type"] = _id_type(ityp)
dct["identity_class"] = _import_identity(ityp)
# Handle both the old and new names for this setting.
debug = safe_get(section, "debug")
if debug is None:
debug = safe_get(section, "http_debug", "False")
dct["http_debug"] = debug == "True"
verify_ssl = safe_get(section, "verify_ssl", "True")
dct["verify_ssl"] = verify_ssl == "True"
dct["keyring_username"] = safe_get(section, "keyring_username")
dct["encoding"] = safe_get(section, "encoding", default_encoding)
dct["auth_endpoint"] = safe_get(section, "auth_endpoint")
dct["tenant_name"] = safe_get(section, "tenant_name")
dct["tenant_id"] = safe_get(section, "tenant_id")
use_servicenet = safe_get(section, "use_servicenet", "False")
dct["use_servicenet"] = use_servicenet == "True"
app_agent = safe_get(section, "custom_user_agent")
if app_agent:
# Customize the user-agent string with the app name.
dct["user_agent"] = "%s %s" % (app_agent, USER_AGENT)
else:
dct["user_agent"] = USER_AGENT
# If this is the first section, make it the default
if not self._default_set:
self._settings["default"] = self._settings[section]
self._default_set = True
if creds_found:
warnings.warn("Login credentials were detected in your .pyrax.cfg "
"file. These have been ignored, but you should remove "
"them and either place them in a credential file, or "
"consider using another means of authentication. More "
"information on the use of credential files can be found "
"in the 'docs/getting_started.md' document.")
def get_environment():
"""
Returns the name of the current environment.
"""
return settings.environment
def set_environment(env):
"""
Change your configuration environment. An EnvironmentNotFound exception
is raised if you pass in an undefined environment name.
"""
settings.environment = env
def list_environments():
"""
Returns a list of all defined environments.
"""
return settings.environments
def get_setting(key, env=None):
"""
Returns the config setting for the specified key. If no environment is
specified, returns the setting for the current environment.
"""
return settings.get(key, env=env)
def set_setting(key, val, env=None):
"""
Changes the value of the specified key in the current environment, or in
another environment if specified.
"""
return settings.set(key, val, env=env)
def set_default_region(region):
"""Changes the default_region setting."""
global default_region
default_region = region
def create_context(id_type=None, env=None, username=None, password=None,
tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None):
"""
Returns an instance of the specified identity class, or if none is
specified, an instance of the current setting for 'identity_class'.
You may optionally set the environment by passing the name of that
environment in the 'env' parameter.
"""
if env:
set_environment(env)
return _create_identity(id_type=id_type, username=username,
password=password, tenant_id=tenant_id, tenant_name=tenant_name,
api_key=api_key, verify_ssl=verify_ssl, return_context=True)
def _create_identity(id_type=None, username=None, password=None, tenant_id=None,
tenant_name=None, api_key=None, verify_ssl=None,
return_context=False):
"""
Creates an instance of the current identity_class and assigns it to the
module-level name 'identity' by default. If 'return_context' is True, the
module-level 'identity' is untouched, and instead the instance is returned.
"""
if id_type:
cls = _import_identity(id_type)
else:
cls = settings.get("identity_class")
if not cls:
raise exc.IdentityClassNotDefined("No identity class has "
"been defined for the current environment.")
if verify_ssl is None:
verify_ssl = get_setting("verify_ssl")
context = cls(username=username, password=password, tenant_id=tenant_id,
tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl)
if return_context:
return context
else:
global identity
identity = context
def _assure_identity(fnc):
"""Ensures that the 'identity' attribute is not None."""
def _wrapped(*args, **kwargs):
if identity is None:
_create_identity()
return fnc(*args, **kwargs)
return _wrapped
def _require_auth(fnc):
"""Authentication decorator."""
@wraps(fnc)
@_assure_identity
def _wrapped(*args, **kwargs):
if not identity.authenticated:
msg = "Authentication required before calling '%s'." % fnc.__name__
raise exc.NotAuthenticated(msg)
return fnc(*args, **kwargs)
return _wrapped
def _safe_region(region=None, context=None):
"""Value to use when no region is specified."""
ret = region or settings.get("region")
context = context or identity
if not ret:
# Nothing specified; get the default from the identity object.
if not context:
_create_identity()
context = identity
ret = context.get_default_region()
if not ret:
# Use the first available region
try:
ret = regions[0]
except IndexError:
ret = ""
return ret
@_assure_identity
def auth_with_token(token, tenant_id=None, tenant_name=None, region=None):
"""
If you already have a valid token and either a tenant ID or name, you can
call this to configure the identity and available services.
"""
global regions, services
identity.auth_with_token(token, tenant_id=tenant_id,
tenant_name=tenant_name)
regions = tuple(identity.regions)
services = tuple(identity.services.keys())
connect_to_services(region=region)
@_assure_identity
def set_credentials(username, api_key=None, password=None, region=None,
tenant_id=None, authenticate=True):
"""
Set the credentials directly, and then try to authenticate.
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections.
"""
global regions, services
pw_key = password or api_key
region = _safe_region(region)
tenant_id = tenant_id or settings.get("tenant_id")
identity.set_credentials(username=username, password=pw_key,
tenant_id=tenant_id, region=region, authenticate=authenticate)
regions = tuple(identity.regions)
services = tuple(identity.services.keys())
connect_to_services(region=region)
@_assure_identity
def set_credential_file(cred_file, region=None, authenticate=True):
"""
Read in the credentials from the supplied file path, and then try to
authenticate. The file should be a standard config file in one of the
following formats:
For Keystone authentication:
[keystone]
username = myusername
password = 1234567890abcdef
tenant_id = abcdef1234567890
For Rackspace authentication:
[rackspace_cloud]
username = myusername
api_key = 1234567890abcdef
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections.
"""
global regions, services
region = _safe_region(region)
identity.set_credential_file(cred_file, region=region,
authenticate=authenticate)
regions = tuple(identity.regions)
services = tuple(identity.services.keys())
connect_to_services(region=region)
def keyring_auth(username=None, region=None, authenticate=True):
"""
Use the password stored within the keyring to authenticate. If a username
is supplied, that name is used; otherwise, the keyring_username value
from the config file is used.
If there is no username defined, or if the keyring module is not installed,
or there is no password set for the given username, the appropriate errors
will be raised.
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections.
"""
if not keyring:
# Module not installed
raise exc.KeyringModuleNotInstalled("The 'keyring' Python module is "
"not installed on this system.")
if username is None:
username = settings.get("keyring_username")
if not username:
raise exc.KeyringUsernameMissing("No username specified for keyring "
"authentication.")
password = keyring.get_password("pyrax", username)
if password is None:
raise exc.KeyringPasswordNotFound("No password was found for the "
"username '%s'." % username)
set_credentials(username, password, region=region,
authenticate=authenticate)
@_assure_identity
def authenticate(connect=True):
"""
Generally you will not need to call this directly; passing in your
credentials via set_credentials() and set_credential_file() will call
authenticate() on the identity object by default. But for situations where
you set your credentials manually or otherwise need finer control over
the authentication sequence, this method will call the identity object's
authenticate() method, and an AuthenticationFailed exception will be raised
if your credentials have not been properly set first.
Normally after successful authentication, connections to the various
services will be made. However, passing False to the `connect` parameter
will skip the service connection step.
The 'connect' parameter is retained for backwards compatibility. It no
longer has any effect.
"""
identity.authenticate()
def clear_credentials():
"""De-authenticate by clearing all the names back to None."""
global identity, regions, services, cloudservers, cloudfiles, cloud_cdn
global cloud_loadbalancers, cloud_databases, cloud_blockstorage, cloud_dns
global cloud_networks, cloud_monitoring, autoscale, images, queues
identity = None
regions = tuple()
services = tuple()
cloudservers = None
cloudfiles = None
cloud_cdn = None
cloud_loadbalancers = None
cloud_databases = None
cloud_blockstorage = None
cloud_dns = None
cloud_networks = None
cloud_monitoring = None
autoscale = None
images = None
queues = None
def _make_agent_name(base):
"""Appends pyrax information to the underlying library's user agent."""
if base:
if "pyrax" in base:
return base
else:
return "%s %s" % (USER_AGENT, base)
else:
return USER_AGENT
def connect_to_services(region=None):
"""Establishes authenticated connections to the various cloud APIs."""
global cloudservers, cloudfiles, cloud_loadbalancers, cloud_databases
global cloud_blockstorage, cloud_dns, cloud_networks, cloud_monitoring
global autoscale, images, queues, cloud_cdn
cloudservers = connect_to_cloudservers(region=region)
cloudfiles = connect_to_cloudfiles(region=region)
cloud_cdn = connect_to_cloud_cdn(region=region)
cloud_loadbalancers = connect_to_cloud_loadbalancers(region=region)
cloud_databases = connect_to_cloud_databases(region=region)
cloud_blockstorage = connect_to_cloud_blockstorage(region=region)
cloud_dns = connect_to_cloud_dns(region=region)
cloud_networks = connect_to_cloud_networks(region=region)
cloud_monitoring = connect_to_cloud_monitoring(region=region)
autoscale = connect_to_autoscale(region=region)
images = connect_to_images(region=region)
queues = connect_to_queues(region=region)
def _get_service_endpoint(context, svc, region=None, public=True):
"""
Parses the services dict to get the proper endpoint for the given service.
"""
region = _safe_region(region)
# If a specific context is passed, use that. Otherwise, use the global
# identity reference.
context = context or identity
url_type = {True: "public", False: "private"}[public]
svc_obj = context.services.get(svc)
if not svc_obj:
return None
ep = svc_obj.endpoints.get(region, {}).get(url_type)
if not ep:
# Try the "ALL" region, and substitute the actual region
ep = svc_obj.endpoints.get("ALL", {}).get(url_type)
return ep
def connect_to_cloudservers(region=None, context=None, verify_ssl=None, **kwargs):
"""Creates a client for working with cloud servers."""
context = context or identity
_cs_auth_plugin.discover_auth_systems()
id_type = get_setting("identity_type")
if id_type != "keystone":
auth_plugin = _cs_auth_plugin.load_plugin(id_type)
else:
auth_plugin = None
region = _safe_region(region, context=context)
mgt_url = _get_service_endpoint(context, "compute", region)
cloudservers = None
if not mgt_url:
# Service is not available
return
if verify_ssl is None:
insecure = not get_setting("verify_ssl")
else:
insecure = not verify_ssl
try:
extensions = nc.discover_extensions(_cs_max_version)
except AttributeError:
extensions = None
clt_class = _cs_client.get_client_class(_cs_max_version)
cloudservers = clt_class(context.username, context.password,
project_id=context.tenant_id, auth_url=context.auth_endpoint,
auth_system=id_type, region_name=region, service_type="compute",
auth_plugin=auth_plugin, insecure=insecure, extensions=extensions,
http_log_debug=_http_debug, **kwargs)
agt = cloudservers.client.USER_AGENT
cloudservers.client.USER_AGENT = _make_agent_name(agt)
cloudservers.client.management_url = mgt_url
cloudservers.client.auth_token = context.token
cloudservers.exceptions = _cs_exceptions
# Add some convenience methods
cloudservers.list_images = cloudservers.images.list
cloudservers.list_flavors = cloudservers.flavors.list
cloudservers.list = cloudservers.servers.list
def list_base_images():
"""
Returns a list of all base images; excludes any images created
by this account.
"""
return [image for image in cloudservers.images.list()
if not hasattr(image, "server")]
def list_snapshots():
"""
Returns a list of all images created by this account; in other words, it
excludes all the base images.
"""
return [image for image in cloudservers.images.list()
if hasattr(image, "server")]
def find_images_by_name(expr):
"""
Returns a list of images whose name contains the specified expression.
The value passed is treated as a regular expression, allowing for more
specific searches than simple wildcards. The matching is done in a
case-insensitive manner.
"""
return [image for image in cloudservers.images.list()
if re.search(expr, image.name, re.I)]
cloudservers.list_base_images = list_base_images
cloudservers.list_snapshots = list_snapshots
cloudservers.find_images_by_name = find_images_by_name
cloudservers.identity = identity
return cloudservers
def connect_to_cloudfiles(region=None, public=None):
"""Creates a client for working with CloudFiles/Swift."""
if public is None:
is_public = not bool(get_setting("use_servicenet"))
else:
is_public = public
ret = _create_client(ep_name="object_store", region=region,
public=is_public)
if ret:
# Add CDN endpoints, if available
region = _safe_region(region)
ret.cdn_management_url = _get_service_endpoint(None, "object_cdn",
region, public=is_public)
return ret
@_require_auth
def _create_client(ep_name, region, public=True, verify_ssl=None):
region = _safe_region(region)
ep = _get_service_endpoint(None, ep_name.split(":")[0], region,
public=public)
if not ep:
return
if verify_ssl is None:
verify_ssl = get_setting("verify_ssl")
cls = _client_classes[ep_name]
client = cls(identity, region_name=region, management_url=ep,
verify_ssl=verify_ssl, http_log_debug=_http_debug)
client.user_agent = _make_agent_name(client.user_agent)
return client
def connect_to_cloud_databases(region=None):
"""Creates a client for working with cloud databases."""
return _create_client(ep_name="database", region=region)
def connect_to_cloud_cdn(region=None):
"""Creates a client for working with cloud loadbalancers."""
global default_region
# (nicholaskuechler/keekz) 2017-11-30 - Not a very elegant solution...
# Cloud CDN only exists in 2 regions: DFW and LON
# But this isn't playing nicely with the identity service catalog results.
# US auth based regions (DFW, ORD, IAD, SYD, HKG) need to use CDN in DFW
# UK auth based regions (LON) need to use CDN in LON
if region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']:
return _create_client(ep_name="cdn", region="DFW")
elif region in ['LON']:
return _create_client(ep_name="cdn", region="LON")
else:
if default_region in ['DFW', 'IAD', 'ORD', 'SYD', 'HKG']:
return _create_client(ep_name="cdn", region="DFW")
elif default_region in ['LON']:
return _create_client(ep_name="cdn", region="LON")
else:
return _create_client(ep_name="cdn", region=region)
def connect_to_cloud_loadbalancers(region=None):
"""Creates a client for working with cloud loadbalancers."""
return _create_client(ep_name="load_balancer", region=region)
def connect_to_cloud_blockstorage(region=None):
"""Creates a client for working with cloud blockstorage."""
return _create_client(ep_name="volume", region=region)
def connect_to_cloud_dns(region=None):
"""Creates a client for working with cloud dns."""
return _create_client(ep_name="dns", region=region)
def connect_to_cloud_networks(region=None):
"""Creates a client for working with cloud networks."""
return _create_client(ep_name="compute:network", region=region)
def connect_to_cloud_monitoring(region=None):
"""Creates a client for working with cloud monitoring."""
return _create_client(ep_name="monitor", region=region)
def connect_to_autoscale(region=None):
"""Creates a client for working with AutoScale."""
return _create_client(ep_name="autoscale", region=region)
def connect_to_images(region=None, public=True):
"""Creates a client for working with Images."""
return _create_client(ep_name="image", region=region, public=public)
def connect_to_queues(region=None, public=True):
"""Creates a client for working with Queues."""
return _create_client(ep_name="queues", region=region, public=public)
def client_class_for_service(service):
"""
Returns the client class registered for the given service, or None if there
is no such service, or if no class has been registered.
"""
return _client_classes.get(service)
def get_http_debug():
return _http_debug
def set_http_debug(val):
global _http_debug
_http_debug = val
# Set debug on the various services
if identity:
identity.http_log_debug = val
for svc in (cloudservers, cloudfiles, cloud_loadbalancers,
cloud_blockstorage, cloud_databases, cloud_dns, cloud_networks,
autoscale, images, queues):
if svc is not None:
svc.http_log_debug = val
def get_encoding():
"""Returns the unicode encoding type."""
return settings.get("encoding") or default_encoding
# Read in the configuration file, if any
settings = Settings()
config_file = os.path.join(os.path.expanduser("~"), ".pyrax.cfg")
if os.path.exists(config_file):
settings.read_config(config_file)
debug = get_setting("http_debug") or False
set_http_debug(debug)
# Set up logging
_logger = logging.getLogger("pyrax")
_logger.setLevel(logging.DEBUG)
_logger.addHandler(logging.StreamHandler())
|
|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import mock
from oslo_config import cfg
import testtools
from neutron._i18n import _
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.tests import base
from neutron.tests import tools
IPTABLES_ARG = {'bn': iptables_manager.binary_name,
'snat_out_comment': ic.SNAT_OUT,
'filter_rules': '',
'mark': constants.ROUTER_MARK_MASK}
NAT_TEMPLATE = ('# Generated by iptables_manager\n'
'*nat\n'
':OUTPUT - [0:0]\n'
':POSTROUTING - [0:0]\n'
':PREROUTING - [0:0]\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
'-I POSTROUTING 2 -j neutron-postrouting-bottom\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n'
'-I %(bn)s-snat 1 -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n')
NAT_DUMP = NAT_TEMPLATE % IPTABLES_ARG
FILTER_TEMPLATE = ('# Generated by iptables_manager\n'
'*filter\n'
':FORWARD - [0:0]\n'
':INPUT - [0:0]\n'
':OUTPUT - [0:0]\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-scope - [0:0]\n'
'-I FORWARD 1 -j neutron-filter-top\n'
'-I FORWARD 2 -j %(bn)s-FORWARD\n'
'-I INPUT 1 -j %(bn)s-INPUT\n'
'-I OUTPUT 1 -j neutron-filter-top\n'
'-I OUTPUT 2 -j %(bn)s-OUTPUT\n'
'-I neutron-filter-top 1 -j %(bn)s-local\n'
'-I %(bn)s-FORWARD 1 -j %(bn)s-scope\n'
'COMMIT\n'
'# Completed by iptables_manager\n')
FILTER_DUMP = FILTER_TEMPLATE % IPTABLES_ARG
FILTER_WITH_RULES_TEMPLATE = (
'# Generated by iptables_manager\n'
'*filter\n'
':FORWARD - [0:0]\n'
':INPUT - [0:0]\n'
':OUTPUT - [0:0]\n'
':neutron-filter-top - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-filter - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-scope - [0:0]\n'
'-I FORWARD 1 -j neutron-filter-top\n'
'-I FORWARD 2 -j %(bn)s-FORWARD\n'
'-I INPUT 1 -j %(bn)s-INPUT\n'
'-I OUTPUT 1 -j neutron-filter-top\n'
'-I OUTPUT 2 -j %(bn)s-OUTPUT\n'
'-I neutron-filter-top 1 -j %(bn)s-local\n'
'-I %(bn)s-FORWARD 1 -j %(bn)s-scope\n'
'%(filter_rules)s'
'COMMIT\n'
'# Completed by iptables_manager\n')
COMMENTED_NAT_DUMP = (
'# Generated by iptables_manager\n'
'*nat\n'
':OUTPUT - [0:0]\n'
':POSTROUTING - [0:0]\n'
':PREROUTING - [0:0]\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
'-I POSTROUTING 2 -j neutron-postrouting-bottom\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I neutron-postrouting-bottom 1 '
'-m comment --comment "%(snat_out_comment)s" -j %(bn)s-snat\n'
'-I %(bn)s-snat 1 -j '
'%(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
TRAFFIC_COUNTERS_DUMP = (
'Chain OUTPUT (policy ACCEPT 400 packets, 65901 bytes)\n'
' pkts bytes target prot opt in out source'
' destination \n'
' 400 65901 chain1 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n'
' 400 65901 chain2 all -- * * 0.0.0.0/0'
' 0.0.0.0/0 \n')
class IptablesTestCase(base.BaseTestCase):
def test_get_binary_name_in_unittest(self):
# Corresponds to sys.argv content when running python -m unittest class
with mock.patch('sys.argv', ['python -m unittest', 'class']):
binary_name = iptables_manager.get_binary_name()
self.assertEqual('python_-m_unitte', binary_name)
class IptablesCommentsTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesCommentsTestCase, self).setUp()
cfg.CONF.set_override('comment_iptables_rules', True, 'AGENT')
self.iptables = iptables_manager.IptablesManager()
self.execute = mock.patch.object(self.iptables, "execute").start()
def test_comments_short_enough(self):
for attr in dir(ic):
if not attr.startswith('__') and len(getattr(ic, attr)) > 255:
self.fail("Iptables comment %s is longer than 255 characters."
% attr)
def test_reordering_of_jump_rule_comments(self):
# jump at the start
self.assertEqual(
'-m comment --comment "aloha" -j sg-chain',
iptables_manager.comment_rule('-j sg-chain', 'aloha'))
# jump in the middle
self.assertEqual(
'-s source -m comment --comment "aloha" -j sg-chain',
iptables_manager.comment_rule('-s source -j sg-chain', 'aloha'))
# no jump rule
self.assertEqual(
'-s source -m comment --comment "aloha"',
iptables_manager.comment_rule('-s source', 'aloha'))
def test_add_filter_rule(self):
iptables_args = {}
iptables_args.update(IPTABLES_ARG)
filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j '
'%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n'
% iptables_args)
iptables_args['filter_rules'] = filter_rules
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
raw_dump = _generate_raw_dump(IPTABLES_ARG)
mangle_dump = _generate_mangle_dump(IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump_mod + mangle_dump +
COMMENTED_NAT_DUMP + raw_dump),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + mangle_dump +
COMMENTED_NAT_DUMP + raw_dump),
run_as_root=True
),
None),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter' % IPTABLES_ARG)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter'
% IPTABLES_ARG)
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _generate_mangle_dump(iptables_args):
return ('# Generated by iptables_manager\n'
'*mangle\n'
':FORWARD - [0:0]\n'
':INPUT - [0:0]\n'
':OUTPUT - [0:0]\n'
':POSTROUTING - [0:0]\n'
':PREROUTING - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-floatingip - [0:0]\n'
':%(bn)s-mark - [0:0]\n'
':%(bn)s-scope - [0:0]\n'
'-I FORWARD 1 -j %(bn)s-FORWARD\n'
'-I INPUT 1 -j %(bn)s-INPUT\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n'
'-I %(bn)s-PREROUTING 2 -j %(bn)s-scope\n'
'-I %(bn)s-PREROUTING 3 -m connmark ! --mark 0x0/0xffff0000 '
'-j CONNMARK --restore-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000\n'
'-I %(bn)s-PREROUTING 4 -j %(bn)s-floatingip\n'
'-I %(bn)s-float-snat 1 -m connmark --mark 0x0/0xffff0000 '
'-j CONNMARK --save-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
def _generate_mangle_dump_v6(iptables_args):
return ('# Generated by iptables_manager\n'
'*mangle\n'
':FORWARD - [0:0]\n'
':INPUT - [0:0]\n'
':OUTPUT - [0:0]\n'
':POSTROUTING - [0:0]\n'
':PREROUTING - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-scope - [0:0]\n'
'-I FORWARD 1 -j %(bn)s-FORWARD\n'
'-I INPUT 1 -j %(bn)s-INPUT\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I %(bn)s-PREROUTING 1 -j %(bn)s-scope\n'
'-I %(bn)s-PREROUTING 2 -m connmark ! --mark 0x0/0xffff0000 '
'-j CONNMARK --restore-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
def _generate_raw_dump(iptables_args):
return ('# Generated by iptables_manager\n'
'*raw\n'
':OUTPUT - [0:0]\n'
':PREROUTING - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % iptables_args)
MANGLE_DUMP = _generate_mangle_dump(IPTABLES_ARG)
MANGLE_DUMP_V6 = _generate_mangle_dump_v6(IPTABLES_ARG)
RAW_DUMP = _generate_raw_dump(IPTABLES_ARG)
class IptablesManagerStateFulTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateFulTestCase, self).setUp()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.iptables = iptables_manager.IptablesManager()
self.execute = mock.patch.object(self.iptables, "execute").start()
def test_binary_name(self):
expected = os.path.basename(sys.argv[0])[:16]
self.assertEqual(expected, iptables_manager.binary_name)
def test_get_chain_name(self):
name = '0123456789' * 5
# 28 chars is the maximum length of iptables chain name.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=False),
name[:28])
# 11 chars is the maximum length of chain name of iptable_manager
# if binary_name is prepended.
self.assertEqual(iptables_manager.get_chain_name(name, wrap=True),
name[:11])
def test_defer_apply_with_exception(self):
self.iptables._apply = mock.Mock(side_effect=Exception)
with testtools.ExpectedException(n_exc.IpTablesApplyException):
with self.iptables.defer_apply():
pass
def _extend_with_ip6tables_filter(self, expected_calls, filter_dump):
expected_calls.insert(2, (
mock.call(['ip6tables-save'],
run_as_root=True),
''))
expected_calls.insert(3, (
mock.call(['ip6tables-restore', '-n'],
process_input=filter_dump,
run_as_root=True),
None))
expected_calls.extend([
(mock.call(['ip6tables-save'],
run_as_root=True),
''),
(mock.call(['ip6tables-restore', '-n'],
process_input=filter_dump,
run_as_root=True),
None)])
def _test_add_and_remove_chain_custom_binary_name_helper(self, use_ipv6):
bn = ("xbcdef" * 5)
self.iptables = iptables_manager.IptablesManager(
binary_name=bn,
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {'bn': bn[:16], 'filter_rules': ''}
filter_dump = FILTER_WITH_RULES_TEMPLATE % iptables_args
filter_dump_ipv6 = FILTER_TEMPLATE % iptables_args
filter_dump_mod = filter_dump
nat_dump = NAT_TEMPLATE % iptables_args
raw_dump = _generate_raw_dump(iptables_args)
mangle_dump = _generate_mangle_dump(iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump_mod + mangle_dump +
nat_dump + raw_dump),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump + mangle_dump +
nat_dump + raw_dump),
run_as_root=True),
None),
]
if use_ipv6:
mangle_dump_v6 = _generate_mangle_dump_v6(iptables_args)
self._extend_with_ip6tables_filter(
expected_calls_and_values,
filter_dump_ipv6 + mangle_dump_v6 + raw_dump)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].empty_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_and_remove_chain_custom_binary_name(self):
self._test_add_and_remove_chain_custom_binary_name_helper(False)
def test_add_and_remove_chain_custom_binary_name_with_ipv6(self):
self._test_add_and_remove_chain_custom_binary_name_helper(True)
def _test_empty_chain_custom_binary_name_helper(self, use_ipv6):
bn = ("xbcdef" * 5)[:16]
self.iptables = iptables_manager.IptablesManager(
binary_name=bn,
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {'bn': bn}
filter_dump = FILTER_TEMPLATE % iptables_args
filter_rules = ('-I %(bn)s-filter 1 -s 0/0 -d 192.168.0.2\n'
% iptables_args)
iptables_args['filter_rules'] = filter_rules
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
nat_dump = NAT_TEMPLATE % iptables_args
raw_dump = _generate_raw_dump(iptables_args)
mangle_dump = _generate_mangle_dump(iptables_args)
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump_mod + mangle_dump +
nat_dump + raw_dump),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump + mangle_dump +
nat_dump + raw_dump),
run_as_root=True),
None),
]
if use_ipv6:
mangle_dump_v6 = _generate_mangle_dump_v6(iptables_args)
self._extend_with_ip6tables_filter(
expected_calls_and_values,
filter_dump + mangle_dump_v6 + raw_dump)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter',
'-s 0/0 -d 192.168.0.2')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_empty_chain_custom_binary_name(self):
self._test_empty_chain_custom_binary_name_helper(False)
def test_empty_chain_custom_binary_name_with_ipv6(self):
self._test_empty_chain_custom_binary_name_helper(True)
def _test_add_and_remove_chain_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % IPTABLES_ARG
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump_mod + MANGLE_DUMP +
NAT_DUMP + RAW_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
RAW_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(
expected_calls_and_values,
FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_and_remove_chain(self):
self._test_add_and_remove_chain_helper(False)
def test_add_and_remove_chain_with_ipv6(self):
self._test_add_and_remove_chain_helper(True)
def _test_add_filter_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
iptables_args = {}
iptables_args.update(IPTABLES_ARG)
filter_rules = ('-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j '
'%(bn)s-filter\n-I %(bn)s-filter 1 -j DROP\n'
% iptables_args)
iptables_args['filter_rules'] = filter_rules
filter_dump_mod = FILTER_WITH_RULES_TEMPLATE % iptables_args
raw_dump = RAW_DUMP % IPTABLES_ARG
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump_mod + MANGLE_DUMP +
NAT_DUMP + RAW_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
RAW_DUMP),
run_as_root=True
),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(
expected_calls_and_values,
FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter' % IPTABLES_ARG)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %(bn)s-filter'
% IPTABLES_ARG)
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_filter_rule(self):
self._test_add_filter_rule_helper(False)
def test_add_filter_rule_with_ipv6(self):
self._test_add_filter_rule_helper(True)
def _test_rule_with_wrap_target_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
name = '0123456789' * 5
wrap = "%s-%s" % (iptables_manager.binary_name,
iptables_manager.get_chain_name(name))
iptables_args = {'bn': iptables_manager.binary_name,
'wrap': wrap}
filter_dump_mod = ('# Generated by iptables_manager\n'
'*filter\n'
':FORWARD - [0:0]\n'
':INPUT - [0:0]\n'
':OUTPUT - [0:0]\n'
':neutron-filter-top - [0:0]\n'
':%(wrap)s - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-local - [0:0]\n'
':%(bn)s-scope - [0:0]\n'
'-I FORWARD 1 -j neutron-filter-top\n'
'-I FORWARD 2 -j %(bn)s-FORWARD\n'
'-I INPUT 1 -j %(bn)s-INPUT\n'
'-I OUTPUT 1 -j neutron-filter-top\n'
'-I OUTPUT 2 -j %(bn)s-OUTPUT\n'
'-I neutron-filter-top 1 -j %(bn)s-local\n'
'-I %(bn)s-FORWARD 1 -j %(bn)s-scope\n'
'-I %(bn)s-INPUT 1 -s 0/0 -d 192.168.0.2 -j '
'%(wrap)s\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% iptables_args)
raw_dump = RAW_DUMP % IPTABLES_ARG
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(filter_dump_mod + MANGLE_DUMP +
NAT_DUMP + RAW_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP +
NAT_DUMP + RAW_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(
expected_calls_and_values,
FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['filter'].add_chain(name)
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' $%s' % name)
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' $%s' % name)
self.iptables.ipv4['filter'].remove_chain(name)
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_rule_with_wrap_target(self):
self._test_rule_with_wrap_target_helper(False)
def test_rule_with_wrap_target_with_ipv6(self):
self._test_rule_with_wrap_target_helper(True)
def _test_add_mangle_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
mangle_dump_mod = (
'# Generated by iptables_manager\n'
'*mangle\n'
':FORWARD - [0:0]\n'
':INPUT - [0:0]\n'
':OUTPUT - [0:0]\n'
':POSTROUTING - [0:0]\n'
':PREROUTING - [0:0]\n'
':%(bn)s-FORWARD - [0:0]\n'
':%(bn)s-INPUT - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-floatingip - [0:0]\n'
':%(bn)s-mangle - [0:0]\n'
':%(bn)s-mark - [0:0]\n'
':%(bn)s-scope - [0:0]\n'
'-I FORWARD 1 -j %(bn)s-FORWARD\n'
'-I INPUT 1 -j %(bn)s-INPUT\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I %(bn)s-PREROUTING 1 -j %(bn)s-mark\n'
'-I %(bn)s-PREROUTING 2 -j %(bn)s-scope\n'
'-I %(bn)s-PREROUTING 3 -m connmark ! --mark 0x0/0xffff0000 '
'-j CONNMARK --restore-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000\n'
'-I %(bn)s-PREROUTING 4 -j %(bn)s-floatingip\n'
'-I %(bn)s-PREROUTING 5 -j MARK --set-xmark 0x1/%(mark)s\n'
'-I %(bn)s-float-snat 1 -m connmark --mark 0x0/0xffff0000 '
'-j CONNMARK --save-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + mangle_dump_mod +
NAT_DUMP + RAW_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP +
NAT_DUMP + RAW_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(
expected_calls_and_values,
FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['mangle'].add_chain('mangle')
self.iptables.ipv4['mangle'].add_rule(
'PREROUTING',
'-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK)
self.iptables.apply()
self.iptables.ipv4['mangle'].remove_rule(
'PREROUTING',
'-j MARK --set-xmark 0x1/%s' % constants.ROUTER_MARK_MASK)
self.iptables.ipv4['mangle'].remove_chain('mangle')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_mangle_rule(self):
self._test_add_mangle_rule_helper(False)
def test_add_mangle_rule_with_ipv6(self):
self._test_add_mangle_rule_helper(True)
def _test_add_nat_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
nat_dump = NAT_TEMPLATE % IPTABLES_ARG
nat_dump_mod = ('# Generated by iptables_manager\n'
'*nat\n'
':OUTPUT - [0:0]\n'
':POSTROUTING - [0:0]\n'
':PREROUTING - [0:0]\n'
':neutron-postrouting-bottom - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-POSTROUTING - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-float-snat - [0:0]\n'
':%(bn)s-nat - [0:0]\n'
':%(bn)s-snat - [0:0]\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I POSTROUTING 1 -j %(bn)s-POSTROUTING\n'
'-I POSTROUTING 2 -j neutron-postrouting-bottom\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I neutron-postrouting-bottom 1 -j %(bn)s-snat\n'
'-I %(bn)s-PREROUTING 1 -d 192.168.0.3 -j '
'%(bn)s-nat\n'
'-I %(bn)s-nat 1 -p tcp --dport 8080 -j '
'REDIRECT --to-port 80\n'
'-I %(bn)s-snat 1 -j %(bn)s-float-snat\n'
'COMMIT\n'
'# Completed by iptables_manager\n' % IPTABLES_ARG)
raw_dump = RAW_DUMP % IPTABLES_ARG
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP +
nat_dump_mod + RAW_DUMP),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP + nat_dump +
RAW_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(
expected_calls_and_values,
FILTER_DUMP + MANGLE_DUMP_V6 + raw_dump)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['nat'].add_chain('nat')
self.iptables.ipv4['nat'].add_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].add_rule('nat',
'-p tcp --dport 8080' +
' -j REDIRECT --to-port 80')
self.iptables.apply()
self.iptables.ipv4['nat'].remove_rule('nat',
'-p tcp --dport 8080 -j'
' REDIRECT --to-port 80')
self.iptables.ipv4['nat'].remove_rule('PREROUTING',
'-d 192.168.0.3 -j '
'%(bn)s-nat' % IPTABLES_ARG)
self.iptables.ipv4['nat'].remove_chain('nat')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_nat_rule(self):
self._test_add_nat_rule_helper(False)
def test_add_nat_rule_with_ipv6(self):
self._test_add_nat_rule_helper(True)
def _test_add_raw_rule_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
raw_dump_mod = ('# Generated by iptables_manager\n'
'*raw\n'
':OUTPUT - [0:0]\n'
':PREROUTING - [0:0]\n'
':%(bn)s-OUTPUT - [0:0]\n'
':%(bn)s-PREROUTING - [0:0]\n'
':%(bn)s-raw - [0:0]\n'
'-I OUTPUT 1 -j %(bn)s-OUTPUT\n'
'-I PREROUTING 1 -j %(bn)s-PREROUTING\n'
'-I %(bn)s-PREROUTING 1 -j CT --notrack\n'
'COMMIT\n'
'# Completed by iptables_manager\n'
% IPTABLES_ARG)
expected_calls_and_values = [
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
raw_dump_mod),
run_as_root=True),
None),
(mock.call(['iptables-save'],
run_as_root=True),
''),
(mock.call(['iptables-restore', '-n'],
process_input=(FILTER_DUMP + MANGLE_DUMP + NAT_DUMP +
RAW_DUMP),
run_as_root=True),
None),
]
if use_ipv6:
self._extend_with_ip6tables_filter(
expected_calls_and_values,
FILTER_DUMP + MANGLE_DUMP_V6 + RAW_DUMP)
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.iptables.ipv4['raw'].add_chain('raw')
self.iptables.ipv4['raw'].add_rule('PREROUTING',
'-j CT --notrack')
self.iptables.apply()
self.iptables.ipv4['raw'].remove_rule('PREROUTING',
'-j CT --notrack')
self.iptables.ipv4['raw'].remove_chain('raw')
self.iptables.apply()
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_raw_rule(self):
self._test_add_raw_rule_helper(False)
def test_add_raw_rule_with_ipv6(self):
self._test_add_raw_rule_helper(True)
def test_add_rule_to_a_nonexistent_chain(self):
self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule,
'nonexistent', '-j DROP')
def test_remove_nonexistent_chain(self):
with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_chain('nonexistent')
log.debug.assert_called_once_with(
'Attempted to remove chain %s which does not exist',
'nonexistent')
def test_remove_nonexistent_rule(self):
with mock.patch.object(iptables_manager, "LOG") as log:
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
log.warn.assert_called_once_with(
'Tried to remove rule that was not there: '
'%(chain)r %(rule)r %(wrap)r %(top)r',
{'wrap': True, 'top': False, 'rule': '-j DROP',
'chain': 'nonexistent'})
def test_iptables_failure_with_no_failing_line_number(self):
with mock.patch.object(iptables_manager, "LOG") as log:
# generate Runtime errors on iptables-restore calls
def iptables_restore_failer(*args, **kwargs):
if 'iptables-restore' in args[0]:
self.input_lines = kwargs['process_input'].split('\n')
# don't provide a specific failure message so all lines
# are logged
raise RuntimeError()
return FILTER_DUMP
self.execute.side_effect = iptables_restore_failer
# _apply_synchronized calls iptables-restore so it should raise
# a RuntimeError
self.assertRaises(RuntimeError,
self.iptables._apply_synchronized)
# The RuntimeError should have triggered a log of the input to the
# process that it failed to execute. Verify by comparing the log
# call to the 'process_input' arg given to the failed iptables-restore
# call.
# Failure without a specific line number in the error should cause
# all lines to be logged with numbers.
logged = ['%7d. %s' % (n, l)
for n, l in enumerate(self.input_lines, 1)]
log.error.assert_called_once_with(_(
'IPTablesManager.apply failed to apply the '
'following set of iptables rules:\n%s'),
'\n'.join(logged)
)
def test_iptables_failure_on_specific_line(self):
with mock.patch.object(iptables_manager, "LOG") as log:
# generate Runtime errors on iptables-restore calls
def iptables_restore_failer(*args, **kwargs):
if 'iptables-restore' in args[0]:
self.input_lines = kwargs['process_input'].split('\n')
# pretend line 11 failed
msg = ("Exit code: 1\nStdout: ''\n"
"Stderr: 'iptables-restore: line 11 failed\n'")
raise RuntimeError(msg)
return FILTER_DUMP
self.execute.side_effect = iptables_restore_failer
# _apply_synchronized calls iptables-restore so it should raise
# a RuntimeError
self.assertRaises(RuntimeError,
self.iptables._apply_synchronized)
# The RuntimeError should have triggered a log of the input to the
# process that it failed to execute. Verify by comparing the log
# call to the 'process_input' arg given to the failed iptables-restore
# call.
# Line 11 of the input was marked as failing so lines (11 - context)
# to (11 + context) should be logged
ctx = iptables_manager.IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, 11 - ctx)
log_end = 11 + ctx
logged = ['%7d. %s' % (n, l)
for n, l in enumerate(self.input_lines[log_start:log_end],
log_start + 1)]
log.error.assert_called_once_with(_(
'IPTablesManager.apply failed to apply the '
'following set of iptables rules:\n%s'),
'\n'.join(logged)
)
def test_get_traffic_counters_chain_notexists(self):
with mock.patch.object(iptables_manager, "LOG") as log:
acc = self.iptables.get_traffic_counters('chain1')
self.assertIsNone(acc)
self.assertEqual(0, self.execute.call_count)
log.warn.assert_called_once_with(
'Attempted to get traffic counters of chain %s which '
'does not exist', 'chain1')
def _test_get_traffic_counters_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
exp_packets = 800
exp_bytes = 131802
expected_calls_and_values = [
(mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP),
(mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n',
'-v', '-x'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n',
'-v', '-x'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x'],
run_as_root=True),
''),
]
if use_ipv6:
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT',
'-n', '-v', '-x'], run_as_root=True),
''))
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP))
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'mangle', '-L', 'OUTPUT',
'-n', '-v', '-x'], run_as_root=True),
''))
exp_packets *= 2
exp_bytes *= 2
tools.setup_mock_calls(self.execute, expected_calls_and_values)
acc = self.iptables.get_traffic_counters('OUTPUT')
self.assertEqual(acc['pkts'], exp_packets)
self.assertEqual(acc['bytes'], exp_bytes)
tools.verify_mock_calls(self.execute, expected_calls_and_values,
any_order=True)
def test_get_traffic_counters(self):
self._test_get_traffic_counters_helper(False)
def test_get_traffic_counters_with_ipv6(self):
self._test_get_traffic_counters_helper(True)
def _test_get_traffic_counters_with_zero_helper(self, use_ipv6):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=use_ipv6)
self.execute = mock.patch.object(self.iptables, "execute").start()
exp_packets = 800
exp_bytes = 131802
expected_calls_and_values = [
(mock.call(['iptables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP),
(mock.call(['iptables', '-t', 'raw', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'mangle', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
run_as_root=True),
''),
(mock.call(['iptables', '-t', 'nat', '-L', 'OUTPUT', '-n',
'-v', '-x', '-Z'],
run_as_root=True),
'')
]
if use_ipv6:
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'raw', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'], run_as_root=True),
''))
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'filter', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'],
run_as_root=True),
TRAFFIC_COUNTERS_DUMP))
expected_calls_and_values.append(
(mock.call(['ip6tables', '-t', 'mangle', '-L', 'OUTPUT',
'-n', '-v', '-x', '-Z'], run_as_root=True),
''))
exp_packets *= 2
exp_bytes *= 2
tools.setup_mock_calls(self.execute, expected_calls_and_values)
acc = self.iptables.get_traffic_counters('OUTPUT', zero=True)
self.assertEqual(acc['pkts'], exp_packets)
self.assertEqual(acc['bytes'], exp_bytes)
tools.verify_mock_calls(self.execute, expected_calls_and_values,
any_order=True)
def test_get_traffic_counters_with_zero(self):
self._test_get_traffic_counters_with_zero_helper(False)
def test_get_traffic_counters_with_zero_with_ipv6(self):
self._test_get_traffic_counters_with_zero_helper(True)
class IptablesManagerStateLessTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesManagerStateLessTestCase, self).setUp()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.iptables = (iptables_manager.IptablesManager(state_less=True))
def test_nat_not_found(self):
self.assertNotIn('nat', self.iptables.ipv4)
def test_mangle_not_found(self):
self.assertNotIn('mangle', self.iptables.ipv4)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import six
import testtools
from tempest.api.compute import base
from tempest.common import image as common_image
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions
CONF = config.CONF
class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def skip_checks(cls):
super(ListImageFiltersTestJSON, cls).skip_checks()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(ListImageFiltersTestJSON, cls).setup_clients()
cls.client = cls.compute_images_client
# Check if glance v1 is available to determine which client to use. We
# prefer glance v1 for the compute API tests since the compute image
# API proxy was written for glance v1.
if CONF.image_feature_enabled.api_v1:
cls.glance_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.glance_client = cls.os_primary.image_client_v2
else:
raise exceptions.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
@classmethod
def resource_setup(cls):
super(ListImageFiltersTestJSON, cls).resource_setup()
def _create_image():
params = {
'name': data_utils.rand_name(cls.__name__ + '-image'),
'container_format': 'bare',
'disk_format': 'raw'
}
if CONF.image_feature_enabled.api_v1:
params.update({'is_public': False})
params = {'headers':
common_image.image_meta_to_headers(**params)}
else:
params.update({'visibility': 'private'})
body = cls.glance_client.create_image(**params)
body = body['image'] if 'image' in body else body
image_id = body['id']
cls.images.append(image_id)
# Wait 1 second between creation and upload to ensure a delta
# between created_at and updated_at.
time.sleep(1)
image_file = six.BytesIO((b'*' * 1024))
if CONF.image_feature_enabled.api_v1:
cls.glance_client.update_image(image_id, data=image_file)
else:
cls.glance_client.store_image_file(image_id, data=image_file)
waiters.wait_for_image_status(cls.client, image_id, 'ACTIVE')
body = cls.client.show_image(image_id)['image']
return body
# Create non-snapshot images via glance
cls.image1 = _create_image()
cls.image1_id = cls.image1['id']
cls.image2 = _create_image()
cls.image2_id = cls.image2['id']
cls.image3 = _create_image()
cls.image3_id = cls.image3['id']
if not CONF.compute_feature_enabled.snapshot:
return
# Create instances and snapshots via nova
cls.server1 = cls.create_test_server()
cls.server2 = cls.create_test_server(wait_until='ACTIVE')
# NOTE(sdague) this is faster than doing the sync wait_util on both
waiters.wait_for_server_status(cls.servers_client,
cls.server1['id'], 'ACTIVE')
# Create images to be used in the filter tests
cls.snapshot1 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot1_id = cls.snapshot1['id']
# Servers have a hidden property for when they are being imaged
# Performing back-to-back create image calls on a single
# server will sometimes cause failures
cls.snapshot3 = cls.create_image_from_server(
cls.server2['id'], wait_until='ACTIVE')
cls.snapshot3_id = cls.snapshot3['id']
# Wait for the server to be active after the image upload
cls.snapshot2 = cls.create_image_from_server(
cls.server1['id'], wait_until='ACTIVE')
cls.snapshot2_id = cls.snapshot2['id']
@decorators.idempotent_id('a3f5b513-aeb3-42a9-b18e-f091ef73254d')
def test_list_images_filter_by_status(self):
# The list of images should contain only images with the
# provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image2_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('33163b73-79f5-4d07-a7ea-9213bcc468ff')
def test_list_images_filter_by_name(self):
# List of all images should contain the expected images filtered
# by name
params = {'name': self.image1['name']}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertEmpty([i for i in images if i['id'] == self.image2_id])
self.assertEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('9f238683-c763-45aa-b848-232ec3ce3105')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_server_id(self):
# The images should contain images filtered by server id
params = {'server': self.server1['id']}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot1_id],
"Failed to find image %s in images. "
"Got images %s" % (self.image1_id, images))
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertEmpty([i for i in images if i['id'] == self.snapshot3_id])
@decorators.idempotent_id('05a377b8-28cf-4734-a1e6-2ab5c38bf606')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_server_ref(self):
# The list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(**params)['images']
self.assertEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
@decorators.idempotent_id('e3356918-4d3e-4756-81d5-abc4524ba29f')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_filter_by_type(self):
# The list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(**params)['images']
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
self.assertEmpty([i for i in images if i['id'] == self.image_ref])
@decorators.idempotent_id('3a484ca9-67ba-451e-b494-7fcf28d32d62')
def test_list_images_limit_results(self):
# Verify only the expected number of results are returned
params = {'limit': '1'}
images = self.client.list_images(**params)['images']
self.assertEqual(1, len([x for x in images if 'id' in x]))
@decorators.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
def test_list_images_filter_by_changes_since(self):
# Verify only updated images are returned in the detailed list
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image3['created']}
images = self.client.list_images(**params)['images']
found = [i for i in images if i['id'] == self.image3_id]
self.assertNotEmpty(found)
@decorators.idempotent_id('9b0ea018-6185-4f71-948a-a123a107988e')
def test_list_images_with_detail_filter_by_status(self):
# Detailed list of all images should only contain images
# with the provided status
params = {'status': 'ACTIVE'}
images = self.client.list_images(detail=True, **params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image2_id])
self.assertNotEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('644ea267-9bd9-4f3b-af9f-dffa02396a17')
def test_list_images_with_detail_filter_by_name(self):
# Detailed list of all images should contain the expected
# images filtered by name
params = {'name': self.image1['name']}
images = self.client.list_images(detail=True, **params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
self.assertEmpty([i for i in images if i['id'] == self.image2_id])
self.assertEmpty([i for i in images if i['id'] == self.image3_id])
@decorators.idempotent_id('ba2fa9a9-b672-47cc-b354-3b4c0600e2cb')
def test_list_images_with_detail_limit_results(self):
# Verify only the expected number of results (with full details)
# are returned
params = {'limit': '1'}
images = self.client.list_images(detail=True, **params)['images']
self.assertEqual(1, len(images))
@decorators.idempotent_id('8c78f822-203b-4bf6-8bba-56ebd551cf84')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_with_detail_filter_by_server_ref(self):
# Detailed list of servers should be filtered by server ref
server_links = self.server2['links']
# Try all server link types
for link in server_links:
params = {'server': link['href']}
images = self.client.list_images(detail=True, **params)['images']
self.assertEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
@decorators.idempotent_id('888c0cc0-7223-43c5-9db0-b125fd0a393b')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
def test_list_images_with_detail_filter_by_type(self):
# The detailed list of servers should be filtered by image type
params = {'type': 'snapshot'}
images = self.client.list_images(detail=True, **params)['images']
self.client.show_image(self.image_ref)
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot1_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot2_id])
self.assertNotEmpty([i for i in images
if i['id'] == self.snapshot3_id])
self.assertEmpty([i for i in images if i['id'] == self.image_ref])
@decorators.idempotent_id('7d439e18-ac2e-4827-b049-7e18004712c4')
def test_list_images_with_detail_filter_by_changes_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
params = {'changes-since': self.image1['created']}
images = self.client.list_images(detail=True, **params)['images']
self.assertNotEmpty([i for i in images if i['id'] == self.image1_id])
|
|
import unittest, doctest, operator
import inspect
from test import test_support
from collections import namedtuple, Counter, OrderedDict
from test import mapping_tests
import pickle, cPickle, copy
from random import randrange, shuffle
import keyword
import re
import sys
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
# Silence deprecation warning
sets = test_support.import_module('sets', deprecated=True)
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
py273_named_tuple_pickle = '''\
ccopy_reg
_reconstructor
p0
(ctest.test_collections
TestNT
p1
c__builtin__
tuple
p2
(I10
I20
I30
tp3
tp4
Rp5
ccollections
OrderedDict
p6
((lp7
(lp8
S'x'
p9
aI10
aa(lp10
S'y'
p11
aI20
aa(lp12
S'z'
p13
aI30
aatp14
Rp15
b.
'''
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assertNotIn("u'", repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assertNotIn("u'", repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_factory_doc_attr(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__doc__, 'Point(x, y)')
def test_name_fixer(self):
for spec, renamed in [
[('efg', 'g%hi'), ('efg', '_1')], # field with non-alpha char
[('abc', 'class'), ('abc', '_1')], # field has keyword
[('8efg', '9ghi'), ('_0', '_1')], # field starts with digit
[('abc', '_efg'), ('abc', '_1')], # field with leading underscore
[('abc', 'efg', 'efg', 'ghi'), ('abc', 'efg', '_2', 'ghi')], # duplicate field
[('abc', '', 'x'), ('abc', '_1', 'x')], # fieldname is a space
]:
self.assertEqual(namedtuple('NT', spec, rename=True)._fields, renamed)
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assertNotIn('__weakref__', dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
self.assertEqual(vars(p), p._asdict()) # verify that vars() works
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertIsInstance(p, tuple)
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
# Some names like "self", "cls", "tuple", "itemgetter", and "property"
# failed when used as field names. Test to make sure these now work.
T = namedtuple('T', 'itemgetter property self cls tuple')
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1,2,3,4,5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10,20,30,40,50))
# Broader test of all interesting names in a template
with test_support.captured_stdout() as template:
T = namedtuple('T', 'x', verbose=True)
words = set(re.findall('[A-Za-z]+', template.getvalue()))
words -= set(keyword.kwlist)
T = namedtuple('T', words)
# test __new__
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._fields, values)))
self.assertEqual(t, values)
# test _make
t = T._make(values)
self.assertEqual(t, values)
# exercise __repr__
repr(t)
# test _asdict
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
# test _replace
t = T._make(values)
newvalues = tuple(v*10 for v in values)
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
# test _fields
self.assertEqual(T._fields, tuple(words))
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
def test_pickling_bug_18015(self):
# http://bugs.python.org/issue18015
pt = pickle.loads(py273_named_tuple_pickle)
self.assertEqual(pt.x, 10)
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
def validate_isinstance(self, abc, name):
stub = lambda s, *args: 0
# new-style class
C = type('C', (object,), {name: stub})
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
# old-style class
class C: pass
setattr(C, name, stub)
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
# new-style class
C = type('C', (object,), {'__hash__': None})
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
# old-style class
class C: pass
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
def validate_comparison(self, instance):
ops = ['lt', 'gt', 'le', 'ge', 'ne', 'or', 'and', 'xor', 'sub']
operators = {}
for op in ops:
name = '__' + op + '__'
operators[name] = getattr(operator, name)
class Other:
def __init__(self):
self.right_side = False
def __eq__(self, other):
self.right_side = True
return True
__lt__ = __eq__
__gt__ = __eq__
__le__ = __eq__
__ge__ = __eq__
__ne__ = __eq__
__ror__ = __eq__
__rand__ = __eq__
__rxor__ = __eq__
__rsub__ = __eq__
for name, op in operators.items():
if not hasattr(instance, name):
continue
other = Other()
op(instance, other)
self.assertTrue(other.right_side,'Right side not called for %s.%s'
% (type(instance), name))
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.assertNotIsInstance(x, Hashable)
self.assertFalse(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.assertIsInstance(x, Hashable)
self.assertTrue(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.assertFalse(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
self.validate_isinstance(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.assertNotIsInstance(x, Iterable)
self.assertFalse(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterable)
self.assertTrue(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.assertFalse(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
self.validate_isinstance(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.assertNotIsInstance(x, Iterator)
self.assertFalse(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertTrue(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, 'next', '__iter__')
# Issue 10565
class NextOnly:
def __next__(self):
yield 1
raise StopIteration
self.assertNotIsInstance(NextOnly(), Iterator)
class NextOnlyNew(object):
def __next__(self):
yield 1
raise StopIteration
self.assertNotIsInstance(NextOnlyNew(), Iterator)
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Sized)
self.assertFalse(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.assertIsInstance(x, Sized)
self.assertTrue(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
self.validate_isinstance(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Container)
self.assertFalse(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.assertIsInstance(x, Container)
self.assertTrue(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
self.validate_isinstance(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Callable)
self.assertFalse(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.assertIsInstance(x, Callable)
self.assertTrue(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
self.validate_isinstance(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.assertTrue(issubclass(C, B))
self.assertFalse(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.assertFalse(issubclass(C, B), B.__name__)
B.register(C)
self.assertTrue(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.assertIsInstance(sample(), Set)
self.assertTrue(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
class MySet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
self.validate_comparison(MySet())
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.assertTrue(hash(a) == hash(b))
def test_MutableSet(self):
self.assertIsInstance(set(), MutableSet)
self.assertTrue(issubclass(set, MutableSet))
self.assertNotIsInstance(frozenset(), MutableSet)
self.assertFalse(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(collections.MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
s = MySet([5,43,2,1])
self.assertEqual(s.pop(), 1)
def test_issue8750(self):
empty = WithSet()
full = WithSet(range(10))
s = WithSet(full)
s -= s
self.assertEqual(s, empty)
s = WithSet(full)
s ^= s
self.assertEqual(s, empty)
s = WithSet(full)
s &= s
self.assertEqual(s, full)
s |= s
self.assertEqual(s, full)
def test_issue16373(self):
# Recursion error comparing comparable and noncomparable
# Set instances
class MyComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
class MyNonComparableSet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
def __le__(self, x):
return NotImplemented
def __lt__(self, x):
return NotImplemented
cs = MyComparableSet()
ncs = MyNonComparableSet()
# Run all the variants to make sure they don't mutually recurse
ncs < cs
ncs <= cs
ncs > cs
ncs >= cs
cs < ncs
cs <= ncs
cs > ncs
cs >= ncs
def assertSameSet(self, s1, s2):
# coerce both to a real set then check equality
self.assertEqual(set(s1), set(s2))
def test_Set_interoperability_with_real_sets(self):
# Issue: 8743
class ListSet(Set):
def __init__(self, elements=()):
self.data = []
for elem in elements:
if elem not in self.data:
self.data.append(elem)
def __contains__(self, elem):
return elem in self.data
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
return 'Set({!r})'.format(self.data)
r1 = set('abc')
r2 = set('bcd')
r3 = set('abcde')
f1 = ListSet('abc')
f2 = ListSet('bcd')
f3 = ListSet('abcde')
l1 = list('abccba')
l2 = list('bcddcb')
l3 = list('abcdeedcba')
p1 = sets.Set('abc')
p2 = sets.Set('bcd')
p3 = sets.Set('abcde')
target = r1 & r2
self.assertSameSet(f1 & f2, target)
self.assertSameSet(f1 & r2, target)
self.assertSameSet(r2 & f1, target)
self.assertSameSet(f1 & p2, target)
self.assertSameSet(p2 & f1, target)
self.assertSameSet(f1 & l2, target)
target = r1 | r2
self.assertSameSet(f1 | f2, target)
self.assertSameSet(f1 | r2, target)
self.assertSameSet(r2 | f1, target)
self.assertSameSet(f1 | p2, target)
self.assertSameSet(p2 | f1, target)
self.assertSameSet(f1 | l2, target)
fwd_target = r1 - r2
rev_target = r2 - r1
self.assertSameSet(f1 - f2, fwd_target)
self.assertSameSet(f2 - f1, rev_target)
self.assertSameSet(f1 - r2, fwd_target)
self.assertSameSet(f2 - r1, rev_target)
self.assertSameSet(r1 - f2, fwd_target)
self.assertSameSet(r2 - f1, rev_target)
self.assertSameSet(f1 - p2, fwd_target)
self.assertSameSet(f2 - p1, rev_target)
self.assertSameSet(p1 - f2, fwd_target)
self.assertSameSet(p2 - f1, rev_target)
self.assertSameSet(f1 - l2, fwd_target)
self.assertSameSet(f2 - l1, rev_target)
target = r1 ^ r2
self.assertSameSet(f1 ^ f2, target)
self.assertSameSet(f1 ^ r2, target)
self.assertSameSet(r2 ^ f1, target)
self.assertSameSet(f1 ^ p2, target)
self.assertSameSet(p2 ^ f1, target)
self.assertSameSet(f1 ^ l2, target)
# proper subset
self.assertTrue(f1 < f3)
self.assertFalse(f1 < f1)
self.assertFalse(f1 < f2)
self.assertTrue(r1 < f3)
self.assertFalse(r1 < f1)
self.assertFalse(r1 < f2)
self.assertTrue(r1 < r3)
self.assertFalse(r1 < r1)
self.assertFalse(r1 < r2)
with test_support.check_py3k_warnings():
# python 2 only, cross-type compares will succeed
f1 < l3
f1 < l1
f1 < l2
# any subset
self.assertTrue(f1 <= f3)
self.assertTrue(f1 <= f1)
self.assertFalse(f1 <= f2)
self.assertTrue(r1 <= f3)
self.assertTrue(r1 <= f1)
self.assertFalse(r1 <= f2)
self.assertTrue(r1 <= r3)
self.assertTrue(r1 <= r1)
self.assertFalse(r1 <= r2)
with test_support.check_py3k_warnings():
# python 2 only, cross-type compares will succeed
f1 <= l3
f1 <= l1
f1 <= l2
# proper superset
self.assertTrue(f3 > f1)
self.assertFalse(f1 > f1)
self.assertFalse(f2 > f1)
self.assertTrue(r3 > r1)
self.assertFalse(f1 > r1)
self.assertFalse(f2 > r1)
self.assertTrue(r3 > r1)
self.assertFalse(r1 > r1)
self.assertFalse(r2 > r1)
with test_support.check_py3k_warnings():
# python 2 only, cross-type compares will succeed
f1 > l3
f1 > l1
f1 > l2
# any superset
self.assertTrue(f3 >= f1)
self.assertTrue(f1 >= f1)
self.assertFalse(f2 >= f1)
self.assertTrue(r3 >= r1)
self.assertTrue(f1 >= r1)
self.assertFalse(f2 >= r1)
self.assertTrue(r3 >= r1)
self.assertTrue(r1 >= r1)
self.assertFalse(r2 >= r1)
with test_support.check_py3k_warnings():
# python 2 only, cross-type compares will succeed
f1 >= l3
f1 >=l1
f1 >= l2
# equality
self.assertTrue(f1 == f1)
self.assertTrue(r1 == f1)
self.assertTrue(f1 == r1)
self.assertFalse(f1 == f3)
self.assertFalse(r1 == f3)
self.assertFalse(f1 == r3)
# python 2 only, cross-type compares will succeed
f1 == l3
f1 == l1
f1 == l2
# inequality
self.assertFalse(f1 != f1)
self.assertFalse(r1 != f1)
self.assertFalse(f1 != r1)
self.assertTrue(f1 != f3)
self.assertTrue(r1 != f3)
self.assertTrue(f1 != r3)
# python 2 only, cross-type compares will succeed
f1 != l3
f1 != l1
f1 != l2
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
self.assertTrue(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
class MyMapping(collections.Mapping):
def __len__(self):
return 0
def __getitem__(self, i):
raise IndexError
def __iter__(self):
return iter(())
self.validate_comparison(MyMapping())
def test_MutableMapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), MutableMapping)
self.assertTrue(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_Sequence(self):
for sample in [tuple, list, str]:
self.assertIsInstance(sample(), Sequence)
self.assertTrue(issubclass(sample, Sequence))
self.assertTrue(issubclass(basestring, Sequence))
self.assertIsInstance(range(10), Sequence)
self.assertTrue(issubclass(xrange, Sequence))
self.assertTrue(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableSequence(self):
for sample in [tuple, str]:
self.assertNotIsInstance(sample(), MutableSequence)
self.assertFalse(issubclass(sample, MutableSequence))
for sample in [list]:
self.assertIsInstance(sample(), MutableSequence)
self.assertTrue(issubclass(sample, MutableSequence))
self.assertFalse(issubclass(basestring, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
class TestCounter(unittest.TestCase):
def test_basics(self):
c = Counter('abcaba')
self.assertEqual(c, Counter({'a':3 , 'b': 2, 'c': 1}))
self.assertEqual(c, Counter(a=3, b=2, c=1))
self.assertIsInstance(c, dict)
self.assertIsInstance(c, Mapping)
self.assertTrue(issubclass(Counter, dict))
self.assertTrue(issubclass(Counter, Mapping))
self.assertEqual(len(c), 3)
self.assertEqual(sum(c.values()), 6)
self.assertEqual(sorted(c.values()), [1, 2, 3])
self.assertEqual(sorted(c.keys()), ['a', 'b', 'c'])
self.assertEqual(sorted(c), ['a', 'b', 'c'])
self.assertEqual(sorted(c.items()),
[('a', 3), ('b', 2), ('c', 1)])
self.assertEqual(c['b'], 2)
self.assertEqual(c['z'], 0)
with test_support.check_py3k_warnings():
self.assertEqual(c.has_key('c'), True)
self.assertEqual(c.has_key('z'), False)
self.assertEqual(c.__contains__('c'), True)
self.assertEqual(c.__contains__('z'), False)
self.assertEqual(c.get('b', 10), 2)
self.assertEqual(c.get('z', 10), 10)
self.assertEqual(c, dict(a=3, b=2, c=1))
self.assertEqual(repr(c), "Counter({'a': 3, 'b': 2, 'c': 1})")
self.assertEqual(c.most_common(), [('a', 3), ('b', 2), ('c', 1)])
for i in range(5):
self.assertEqual(c.most_common(i),
[('a', 3), ('b', 2), ('c', 1)][:i])
self.assertEqual(''.join(sorted(c.elements())), 'aaabbc')
c['a'] += 1 # increment an existing value
c['b'] -= 2 # sub existing value to zero
del c['c'] # remove an entry
del c['c'] # make sure that del doesn't raise KeyError
c['d'] -= 2 # sub from a missing value
c['e'] = -5 # directly assign a missing value
c['f'] += 4 # add to a missing value
self.assertEqual(c, dict(a=4, b=0, d=-2, e=-5, f=4))
self.assertEqual(''.join(sorted(c.elements())), 'aaaaffff')
self.assertEqual(c.pop('f'), 4)
self.assertNotIn('f', c)
for i in range(3):
elem, cnt = c.popitem()
self.assertNotIn(elem, c)
c.clear()
self.assertEqual(c, {})
self.assertEqual(repr(c), 'Counter()')
self.assertRaises(NotImplementedError, Counter.fromkeys, 'abc')
self.assertRaises(TypeError, hash, c)
c.update(dict(a=5, b=3))
c.update(c=1)
c.update(Counter('a' * 50 + 'b' * 30))
c.update() # test case with no args
c.__init__('a' * 500 + 'b' * 300)
c.__init__('cdc')
c.__init__()
self.assertEqual(c, dict(a=555, b=333, c=3, d=1))
self.assertEqual(c.setdefault('d', 5), 1)
self.assertEqual(c['d'], 1)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(c['e'], 5)
def test_init(self):
self.assertEqual(list(Counter(self=42).items()), [('self', 42)])
self.assertEqual(list(Counter(iterable=42).items()), [('iterable', 42)])
self.assertEqual(list(Counter(iterable=None).items()), [('iterable', None)])
self.assertRaises(TypeError, Counter, 42)
self.assertRaises(TypeError, Counter, (), ())
self.assertRaises(TypeError, Counter.__init__)
def test_update(self):
c = Counter()
c.update(self=42)
self.assertEqual(list(c.items()), [('self', 42)])
c = Counter()
c.update(iterable=42)
self.assertEqual(list(c.items()), [('iterable', 42)])
c = Counter()
c.update(iterable=None)
self.assertEqual(list(c.items()), [('iterable', None)])
self.assertRaises(TypeError, Counter().update, 42)
self.assertRaises(TypeError, Counter().update, {}, {})
self.assertRaises(TypeError, Counter.update)
def test_copying(self):
# Check that counters are copyable, deepcopyable, picklable, and
#have a repr/eval round-trip
words = Counter('which witch had which witches wrist watch'.split())
update_test = Counter()
update_test.update(words)
for i, dup in enumerate([
words.copy(),
copy.copy(words),
copy.deepcopy(words),
pickle.loads(pickle.dumps(words, 0)),
pickle.loads(pickle.dumps(words, 1)),
pickle.loads(pickle.dumps(words, 2)),
pickle.loads(pickle.dumps(words, -1)),
cPickle.loads(cPickle.dumps(words, 0)),
cPickle.loads(cPickle.dumps(words, 1)),
cPickle.loads(cPickle.dumps(words, 2)),
cPickle.loads(cPickle.dumps(words, -1)),
eval(repr(words)),
update_test,
Counter(words),
]):
msg = (i, dup, words)
self.assertTrue(dup is not words)
self.assertEqual(dup, words)
self.assertEqual(len(dup), len(words))
self.assertEqual(type(dup), type(words))
def test_copy_subclass(self):
class MyCounter(Counter):
pass
c = MyCounter('slartibartfast')
d = c.copy()
self.assertEqual(d, c)
self.assertEqual(len(d), len(c))
self.assertEqual(type(d), type(c))
def test_conversions(self):
# Convert to: set, list, dict
s = 'she sells sea shells by the sea shore'
self.assertEqual(sorted(Counter(s).elements()), sorted(s))
self.assertEqual(sorted(Counter(s)), sorted(set(s)))
self.assertEqual(dict(Counter(s)), dict(Counter(s).items()))
self.assertEqual(set(Counter(s)), set(s))
def test_invariant_for_the_in_operator(self):
c = Counter(a=10, b=-2, c=0)
for elem in c:
self.assertTrue(elem in c)
self.assertIn(elem, c)
def test_multiset_operations(self):
# Verify that adding a zero counter will strip zeros and negatives
c = Counter(a=10, b=-2, c=0) + Counter()
self.assertEqual(dict(c), dict(a=10))
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for counterop, numberop in [
(Counter.__add__, lambda x, y: max(0, x+y)),
(Counter.__sub__, lambda x, y: max(0, x-y)),
(Counter.__or__, lambda x, y: max(0,x,y)),
(Counter.__and__, lambda x, y: max(0, min(x,y))),
]:
result = counterop(p, q)
for x in elements:
self.assertEqual(numberop(p[x], q[x]), result[x],
(counterop, x, p, q))
# verify that results exclude non-positive counts
self.assertTrue(x>0 for x in result.values())
elements = 'abcdef'
for i in range(100):
# verify that random multisets with no repeats are exactly like sets
p = Counter(dict((elem, randrange(0, 2)) for elem in elements))
q = Counter(dict((elem, randrange(0, 2)) for elem in elements))
for counterop, setop in [
(Counter.__sub__, set.__sub__),
(Counter.__or__, set.__or__),
(Counter.__and__, set.__and__),
]:
counter_result = counterop(p, q)
set_result = setop(set(p.elements()), set(q.elements()))
self.assertEqual(counter_result, dict.fromkeys(set_result, 1))
def test_subtract(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50)
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(Counter(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50))
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter('aaabbcd')
c.subtract('aaaabbcce')
self.assertEqual(c, Counter(a=-1, b=0, c=-1, d=1, e=-1))
c = Counter()
c.subtract(self=42)
self.assertEqual(list(c.items()), [('self', -42)])
c = Counter()
c.subtract(iterable=42)
self.assertEqual(list(c.items()), [('iterable', -42)])
self.assertRaises(TypeError, Counter().subtract, 42)
self.assertRaises(TypeError, Counter().subtract, {}, {})
self.assertRaises(TypeError, Counter.subtract)
class TestOrderedDict(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)])
self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)])
self.assertRaises(TypeError, OrderedDict, 42)
self.assertRaises(TypeError, OrderedDict, (), ())
self.assertRaises(TypeError, OrderedDict.__init__)
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
self.assertRaises(TypeError, OrderedDict().update, 42)
self.assertRaises(TypeError, OrderedDict().update, (), ())
self.assertRaises(TypeError, OrderedDict.update)
def test_abc(self):
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(od.keys()[:], [t[0] for t in pairs])
self.assertEqual(od.values()[:], [t[1] for t in pairs])
self.assertEqual(od.items()[:], pairs)
self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
self.assertEqual(list(od.iteritems()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
update_test = OrderedDict()
update_test.update(od)
for i, dup in enumerate([
od.copy(),
copy.copy(od),
copy.deepcopy(od),
pickle.loads(pickle.dumps(od, 0)),
pickle.loads(pickle.dumps(od, 1)),
pickle.loads(pickle.dumps(od, 2)),
pickle.loads(pickle.dumps(od, -1)),
eval(repr(od)),
update_test,
OrderedDict(od),
]):
self.assertTrue(dup is not od)
self.assertEqual(dup, od)
self.assertEqual(list(dup.items()), list(od.items()))
self.assertEqual(len(dup), len(od))
self.assertEqual(type(dup), type(od))
def test_yaml_linkage(self):
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertEqual(len(od.__reduce__()), 2)
od.x = 10
self.assertEqual(len(od.__reduce__()), 3)
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_views(self):
s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
od = OrderedDict.fromkeys(s)
self.assertEqual(list(od.viewkeys()), s)
self.assertEqual(list(od.viewvalues()), [None for k in s])
self.assertEqual(list(od.viewitems()), [(k, None) for k in s])
def test_override_update(self):
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class MyOrderedDict(OrderedDict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
import collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs,
TestCollectionABCs, TestCounter,
TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
test_support.run_unittest(*test_classes)
test_support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, filtermodel, setResponseHeader
from girder.api import access
from girder.constants import AccessType, TokenScope
from girder.models.model_base import AccessException
from girder.utility import ziputil
from girder.utility.progress import ProgressContext
class Collection(Resource):
"""API Endpoint for collections."""
def __init__(self):
super(Collection, self).__init__()
self.resourceName = 'collection'
self.route('DELETE', (':id',), self.deleteCollection)
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getCollection)
self.route('GET', (':id', 'details'), self.getCollectionDetails)
self.route('GET', (':id', 'download'), self.downloadCollection)
self.route('GET', (':id', 'access'), self.getCollectionAccess)
self.route('POST', (), self.createCollection)
self.route('PUT', (':id',), self.updateCollection)
self.route('PUT', (':id', 'access'), self.updateCollectionAccess)
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model='collection')
@autoDescribeRoute(
Description('List or search for collections.')
.responseClass('Collection', array=True)
.param('text', 'Pass this to perform a text search for collections.', required=False)
.pagingParams(defaultSort='name')
)
def find(self, text, limit, offset, sort, params):
user = self.getCurrentUser()
if text is not None:
return list(self.model('collection').textSearch(
text, user=user, limit=limit, offset=offset))
return list(self.model('collection').list(
user=user, offset=offset, limit=limit, sort=sort))
@access.user(scope=TokenScope.DATA_WRITE)
@filtermodel(model='collection')
@autoDescribeRoute(
Description('Create a new collection.')
.responseClass('Collection')
.param('name', 'Name for the collection. Must be unique.')
.param('description', 'Collection description.', required=False)
.param('public', 'Whether the collection should be publicly visible.',
required=False, dataType='boolean', default=False)
.errorResponse()
.errorResponse('You are not authorized to create collections.', 403)
)
def createCollection(self, name, description, public, params):
user = self.getCurrentUser()
if not self.model('collection').hasCreatePrivilege(user):
raise AccessException('You are not authorized to create collections.')
return self.model('collection').createCollection(
name=name, description=description, public=public, creator=user)
@access.public(scope=TokenScope.DATA_READ)
@filtermodel(model='collection')
@autoDescribeRoute(
Description('Get a collection by ID.')
.responseClass('Collection')
.modelParam('id', model='collection', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read permission denied on the collection.', 403)
)
def getCollection(self, collection, params):
return collection
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get detailed information about a collection.')
.modelParam('id', model='collection', level=AccessType.READ)
.errorResponse()
.errorResponse('Read access was denied on the collection.', 403)
)
def getCollectionDetails(self, collection, params):
return {
'nFolders': self.model('collection').countFolders(
collection, user=self.getCurrentUser(), level=AccessType.READ)
}
@access.cookie
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Download an entire collection as a zip archive.')
.modelParam('id', model='collection', level=AccessType.READ)
.jsonParam('mimeFilter', 'JSON list of MIME types to include.', requireArray=True,
required=False)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the collection.', 403)
)
def downloadCollection(self, collection, mimeFilter, params):
setResponseHeader('Content-Type', 'application/zip')
setResponseHeader(
'Content-Disposition', 'attachment; filename="%s%s"' % (collection['name'], '.zip'))
def stream():
zip = ziputil.ZipGenerator(collection['name'])
for (path, file) in self.model('collection').fileList(
collection, user=self.getCurrentUser(), subpath=False, mimeFilter=mimeFilter):
for data in zip.addFile(file, path):
yield data
yield zip.footer()
return stream
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Get the access control list for a collection.')
.modelParam('id', model='collection', level=AccessType.ADMIN)
.errorResponse('ID was invalid.')
.errorResponse('Admin permission denied on the collection.', 403)
)
def getCollectionAccess(self, collection, params):
return self.model('collection').getFullAccessList(collection)
@access.user(scope=TokenScope.DATA_OWN)
@filtermodel(model='collection', addFields={'access'})
@autoDescribeRoute(
Description('Set the access control list for a collection.')
.modelParam('id', model='collection', level=AccessType.ADMIN)
.jsonParam('access', 'The access control list as JSON.', requireObject=True)
.jsonParam('publicFlags', 'List of public access flags to set on the collection.',
required=False, requireArray=True)
.param('public', 'Whether the collection should be publicly visible.',
dataType='boolean', required=False)
.param('recurse', 'Whether the policies should be applied to all '
'folders under this collection as well.', dataType='boolean',
default=False, required=False)
.param('progress', 'If recurse is set to True, this controls whether '
'progress notifications will be sent.', dataType='boolean',
default=False, required=False)
.errorResponse('ID was invalid.')
.errorResponse('Admin permission denied on the collection.', 403)
)
def updateCollectionAccess(
self, collection, access, public, recurse, progress, publicFlags, params):
user = self.getCurrentUser()
progress = progress and recurse
with ProgressContext(progress, user=user, title='Updating permissions',
message='Calculating progress...') as ctx:
if progress:
ctx.update(total=self.model('collection').subtreeCount(
collection, includeItems=False, user=user,
level=AccessType.ADMIN))
return self.model('collection').setAccessList(
collection, access, save=True, user=user, recurse=recurse,
progress=ctx, setPublic=public, publicFlags=publicFlags)
@access.user(scope=TokenScope.DATA_READ)
@filtermodel(model='collection')
@autoDescribeRoute(
Description('Edit a collection by ID.')
.responseClass('Collection')
.modelParam('id', model='collection', level=AccessType.WRITE)
.param('name', 'Unique name for the collection.', required=False, strip=True)
.param('description', 'Collection description.', required=False, strip=True)
.errorResponse('ID was invalid.')
.errorResponse('Write permission denied on the collection.', 403)
)
def updateCollection(self, collection, name, description, params):
if name is not None:
collection['name'] = name
if description is not None:
collection['description'] = description
return self.model('collection').updateCollection(collection)
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Delete a collection by ID.')
.modelParam('id', model='collection', level=AccessType.ADMIN)
.errorResponse('ID was invalid.')
.errorResponse('Admin permission denied on the collection.', 403)
)
def deleteCollection(self, collection, params):
self.model('collection').remove(collection)
return {'message': 'Deleted collection %s.' % collection['name']}
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to train Inception using multiple GPU's with synchronous updates.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from datetime import datetime
import os.path
import re
import time
import numpy as np
import tensorflow as tf
from inception import image_processing
from inception import inception_model as inception
from inception.slim import slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_string('subset', 'train',
"""Either 'train' or 'validation'.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
# Flags governing the type of training.
tf.app.flags.DEFINE_boolean('fine_tune', False,
"""If set, randomly initialize the final layer """
"""of weights in order to train the network on a """
"""new task.""")
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
# **IMPORTANT**
# Please note that this learning rate schedule is heavily dependent on the
# hardware architecture, batch size and any changes to the model architecture
# specification. Selecting a finely tuned learning rate schedule is an
# empirical process that requires some experimentation. Please see README.md
# more guidance and discussion.
#
# With 8 Tesla K40's and a batch size = 256, the following setup achieves
# precision@1 = 73.5% after 100 hours and 100K steps (20 epochs).
# Learning rate decay factor selected from http://arxiv.org/abs/1404.5997.
tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
"""Initial learning rate.""")
tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
"""Epochs after which learning rate decays.""")
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
"""Learning rate decay factor.""")
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
def _tower_loss(images, labels, num_classes, scope):
"""Calculate the total loss on a single tower running the ImageNet model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# When fine-tuning a model, we do not restore the logits but instead we
# randomly initialize the logits. The number of classes in the output of the
# logit is the number of classes in specified Dataset.
restore_logits = not FLAGS.fine_tune
# Build inference Graph.
logits = inception.inference(images, num_classes, for_training=True,
restore_logits=restore_logits,
scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = images.get_shape().as_list()[0]
inception.loss(logits, labels, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % inception.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train(dataset):
"""Train on dataset for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
'Batch size must be divisible by number of GPUs')
split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
# Override the number of preprocessing threads to account for the increased
# number of GPU towers.
num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
images, labels = image_processing.distorted_inputs(
dataset,
num_preprocess_threads=num_preprocess_threads)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
# Split the batch of images and labels.
batch_start = split_batch_size * i
images_batch = tf.slice(images,
begin=[batch_start, 0, 0, 0],
size=[split_batch_size, -1, -1, -1])
labels_batch = tf.slice(labels,
begin=[batch_start],
size=[split_batch_size])
# Force all Variables to reside on the CPU.
with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
# Calculate the loss for one tower of the ImageNet model. This
# function constructs the entire ImageNet model but shares the
# variables across all towers.
loss = _tower_loss(images_batch, labels_batch, num_classes, scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
scope)
# Calculate the gradients for the batch of data on this ImageNet
# tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = _average_gradients(tower_grads)
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
# Note that we maintain a "double-average" of the BatchNormalization
# global statistics. This is more complicated then need be but we employ
# this for backward-compatibility with our previous models.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)
# Another possiblility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all updates to into a single train op.
batchnorm_updates_op = tf.group(*batchnorm_updates)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
variables_to_restore = tf.get_collection(
slim.variables.VARIABLES_TO_RESTORE)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(
FLAGS.train_dir,
graph_def=sess.graph.as_graph_def(add_shapes=True))
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 5000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.RouteTable",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteTable"]:
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_02_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations that generate constants.
See the [constants guide](https://tensorflow.org/api_guides/python/constant_op).
"""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.profiler import trace
from tensorflow.python.util.tf_export import tf_export
def _eager_reshape(tensor, shape, ctx):
"""Eager-only version of Reshape op; requires tensor is an eager Tensor."""
attr_t = tensor._datatype_enum() # pylint: disable=protected-access
attr_tshape, (shape,) = execute.args_to_matching_eager(
[shape], ctx, [dtypes.int32, dtypes.int64], dtypes.int32)
inputs_flat = [tensor, shape]
attrs = ("T", attr_t, "Tshape", attr_tshape)
result, = execute.execute(
b"Reshape", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_fill(dims, value, ctx):
"""Eager-only version of Fill op; requires value is an eager Tensor."""
attr_t = value.dtype.as_datatype_enum
dims = convert_to_eager_tensor(dims, ctx, dtypes.int32)
inputs_flat = [dims, value]
attrs = ("T", attr_t, "index_type", types_pb2.DT_INT32)
result, = execute.execute(
b"Fill", 1, inputs=inputs_flat, attrs=attrs, ctx=ctx)
return result
def _eager_identity(tensor, ctx):
"""Eager-only version of Identity op; requires tensor is an eager Tensor."""
attrs = ("T", tensor.dtype.as_datatype_enum)
result, = execute.execute(
b"Identity", 1, inputs=[tensor], attrs=attrs, ctx=ctx)
return result
def convert_to_eager_tensor(value, ctx, dtype=None):
"""Converts the given `value` to an `EagerTensor`.
Note that this function could return cached copies of created constants for
performance reasons.
Args:
value: value to convert to EagerTensor.
ctx: value of context.context().
dtype: optional desired dtype of the converted EagerTensor.
Returns:
EagerTensor created from value.
Raises:
TypeError: if `dtype` is not compatible with the type of t.
"""
if isinstance(value, ops.EagerTensor):
if dtype is not None and value.dtype != dtype:
raise TypeError("Expected tensor with type %r not %r" % (
dtype, value.dtype))
return value
if dtype is not None:
try:
dtype = dtype.as_datatype_enum
except AttributeError:
dtype = dtypes.as_dtype(dtype).as_datatype_enum
ctx.ensure_initialized()
return ops.EagerTensor(value, ctx.device_name, dtype)
@tf_export(v1=["constant"])
def constant_v1(
value, dtype=None, shape=None, name="Const", verify_shape=False):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions of
the resulting tensor. If not present, the shape of `value` is used.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
`tf.constant` differs from `tf.fill` in a few ways:
* `tf.constant` supports arbitrary constants, not just uniform scalar
Tensors like `tf.fill`.
* `tf.constant` creates a `Const` node in the computation graph with the
exact value at graph construction time. On the other hand, `tf.fill`
creates an Op in the graph that is expanded at runtime.
* Because `tf.constant` only embeds constant values in the graph, it does
not support dynamic shapes based on other runtime Tensors, whereas
`tf.fill` does.
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
"""
return _constant_impl(value, dtype, shape, name, verify_shape=verify_shape,
allow_broadcast=False)
@tf_export("constant", v1=[])
def constant(value, dtype=None, shape=None, name="Const"):
"""Creates a constant tensor from a tensor-like object.
Note: All eager `tf.Tensor` values are immutable (in contrast to
`tf.Variable`). There is nothing especially _constant_ about the value
returned from `tf.constant`. This function it is not fundamentally different
from `tf.convert_to_tensor`. The name `tf.constant` comes from the `value`
being embeded in a `Const` node in the `tf.Graph`. `tf.constant` is useful
for asserting that the value can be embedded that way.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
>>> # Constant 1-D Tensor from a python list.
>>> tf.constant([1, 2, 3, 4, 5, 6])
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> # Or a numpy array
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> tf.constant(a)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[1, 2, 3],
[4, 5, 6]])>
If `dtype` is specified the resulting tensor values are cast to the requested
`dtype`.
>>> tf.constant([1, 2, 3, 4, 5, 6], dtype=tf.float64)
<tf.Tensor: shape=(6,), dtype=float64,
numpy=array([1., 2., 3., 4., 5., 6.])>
If `shape` is set, the `value` is reshaped to match. Scalars are expanded to
fill the `shape`:
>>> tf.constant(0, shape=(2, 3))
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 0, 0],
[0, 0, 0]], dtype=int32)>
>>> tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
`tf.constant` has no effect if an eager Tensor is passed as the `value`, it
even transmits gradients:
>>> v = tf.Variable([0.0])
>>> with tf.GradientTape() as g:
... loss = tf.constant(v + v)
>>> g.gradient(loss, v).numpy()
array([2.], dtype=float32)
But, since `tf.constant` embeds the value in the `tf.Graph` this fails for
symbolic tensors:
>>> with tf.compat.v1.Graph().as_default():
... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)
... t = tf.constant(i)
Traceback (most recent call last):
...
TypeError: ...
`tf.constant` will _always_ create CPU (host) tensors. In order to create
tensors on other devices, use `tf.identity`. (If the `value` is an eager
Tensor, however, the tensor will be returned unmodified as mentioned above.)
Related Ops:
* `tf.convert_to_tensor` is similar but:
* It has no `shape` argument.
* Symbolic tensors are allowed to pass through.
>>> with tf.compat.v1.Graph().as_default():
... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)
... t = tf.convert_to_tensor(i)
* `tf.fill`: differs in a few ways:
* `tf.constant` supports arbitrary constants, not just uniform scalar
Tensors like `tf.fill`.
* `tf.fill` creates an Op in the graph that is expanded at runtime, so it
can efficiently represent large tensors.
* Since `tf.fill` does not embed the value, it can produce dynamically
sized outputs.
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
Raises:
TypeError: if shape is incorrectly specified or unsupported.
ValueError: if called on a symbolic tensor.
"""
return _constant_impl(value, dtype, shape, name, verify_shape=False,
allow_broadcast=True)
def _constant_impl(
value, dtype, shape, name, verify_shape, allow_broadcast):
"""Implementation of constant."""
ctx = context.context()
if ctx.executing_eagerly():
if trace.enabled:
with trace.Trace("tf.constant"):
return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
g = ops.get_default_graph()
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape, verify_shape=verify_shape,
allow_broadcast=allow_broadcast))
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
attrs = {"value": tensor_value, "dtype": dtype_value}
const_tensor = g._create_op_internal( # pylint: disable=protected-access
"Const", [], [dtype_value.type], attrs=attrs, name=name).outputs[0]
if op_callbacks.should_invoke_op_callbacks():
# TODO(b/147670703): Once the special-op creation code paths
# are unified. Remove this `if` block.
callback_outputs = op_callbacks.invoke_op_callbacks(
"Const", tuple(), attrs, (const_tensor,), op_name=name, graph=g)
if callback_outputs is not None:
const_tensor, = callback_outputs
return const_tensor
def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
"""Implementation of eager constant."""
t = convert_to_eager_tensor(value, ctx, dtype)
if shape is None:
return t
shape = tensor_shape.as_shape(shape)
if shape == t.shape:
return t
if verify_shape:
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), tuple(t.shape)))
num_t = t.shape.num_elements()
# TODO(josh11b): Implement shape -> eager tensor conversion.
if num_t == shape.num_elements():
return _eager_reshape(t, shape.as_list(), ctx)
if num_t == 1:
if t.dtype == dtypes.bool:
# We don't have a Fill kernel for bool dtype on GPU. So we first run
# Fill on CPU and then copy to GPU if needed.
with ops.device("/device:CPU:0"):
x = _eager_fill(shape.as_list(), _eager_identity(t, ctx), ctx)
return _eager_identity(x, ctx)
else:
return _eager_fill(shape.as_list(), t, ctx)
raise TypeError("Eager execution of tf.constant with unsupported shape "
"(value has %d elements, shape is %s with %d elements)." %
(num_t, shape, shape.num_elements()))
def is_constant(tensor_or_op):
if isinstance(tensor_or_op, ops.Tensor):
op = tensor_or_op.op
else:
op = tensor_or_op
return op.type == "Const"
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
return constant(v, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
(list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
object, _constant_tensor_conversion_function, 200)
def _tensor_shape_tensor_conversion_function(s,
dtype=None,
name=None,
as_ref=False):
"""Function to convert TensorShape to Tensor."""
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
s_list = s.as_list()
int64_value = 0
for dim in s_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
if dtype == dtypes.int32 and int64_value:
raise ValueError("Cannot convert a TensorShape to dtype int32; "
"a dimension is too large (%s)" % int64_value)
else:
dtype = dtypes.int64 if int64_value else dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s_list, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.TensorShape, _tensor_shape_tensor_conversion_function, 100)
def _dimension_tensor_conversion_function(d,
dtype=None,
name=None,
as_ref=False):
"""Function to convert Dimension to Tensor."""
_ = as_ref
if d.value is None:
raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(d.value, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.Dimension, _dimension_tensor_conversion_function, 100)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import random
from typing import List
import mock
from django.core.management import call_command
from django.urls import reverse
from rest_framework import status
from account.models import User
from agency.permissions import AgencyPermission
from agency.roles import AgencyRole
from common.consts import FLAG_TYPES, PARTNER_TYPES, SANCTION_LIST_TYPES, INTERNAL_FLAG_CATEGORIES, FLAG_CATEGORIES
from common.tests.base import BaseAPITestCase
from common.factories import (
PartnerSimpleFactory,
PartnerFlagFactory,
PartnerVerificationFactory,
AgencyOfficeFactory,
AgencyMemberFactory,
PartnerMemberFactory,
PartnerFactory,
COUNTRIES,
SanctionedNameMatchFactory,
UserFactory)
from partner.models import Partner
from review.models import PartnerFlag
from sanctionslist.models import SanctionedItem, SanctionedName, SanctionedNameMatch
class TestPartnerFlagAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.EDITOR_ADVANCED
def test_create_flag(self):
partner = PartnerSimpleFactory(country_code=self.user.agency_members.first().office.country.code)
url = reverse(
'partner-reviews:flags', kwargs={"partner_id": partner.id}
)
payload = {
"comment": "This is a comment on a flag",
"flag_type": FLAG_TYPES.yellow,
"contact_email": "[email protected]",
"contact_person": "Nancy",
"contact_phone": "Smith"
}
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
self.assertEquals(response.data['submitter']['name'], self.user.get_fullname())
self.assertEquals(response.data['flag_type'], FLAG_TYPES.yellow)
self.assertEquals(response.data['is_valid'], True)
self.assertEquals(response.data['comment'], payload['comment'])
def test_create_observation(self):
partner = PartnerSimpleFactory(country_code=self.user.agency_members.first().office.country.code)
url = reverse(
'partner-reviews:flags', kwargs={"partner_id": partner.id}
)
payload = {
"comment": "This is an observation",
"flag_type": FLAG_TYPES.observation,
"contact_email": "[email protected]",
"contact_person": "Nancy",
"contact_phone": "Smith"
}
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
self.assertEquals(response.data['submitter']['name'], self.user.get_fullname())
self.assertEquals(response.data['flag_type'], FLAG_TYPES.observation)
self.assertEquals(response.data['comment'], payload['comment'])
def test_patch_flag(self):
flag = PartnerFlagFactory(is_valid=True)
# Change valid status
url = reverse('partner-reviews:flag-details', kwargs={"partner_id": flag.partner.id, 'pk': flag.id})
payload = {
'is_valid': False,
'validation_comment': 'comment',
}
response = self.client.patch(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEquals(response.data['is_valid'], False)
# Attempt to modify data. Should not change comment
flag_comment = flag.comment
url = reverse('partner-reviews:flag-details', kwargs={"partner_id": flag.partner.id, 'pk': flag.id})
payload = {
'comment': "%s - Appended" % flag_comment
}
response = self.client.patch(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertEquals(response.data['comment'], flag_comment)
def test_create_invalid_flag(self):
partner = PartnerSimpleFactory(country_code=self.user.agency_members.first().office.country.code)
url = reverse(
'partner-reviews:flags', kwargs={"partner_id": partner.id}
)
payload = {
"comment": "This is a comment on a flag",
"category": 'INVASDASDAD',
"contact_email": "[email protected]",
"contact_person": "Nancy",
"contact_phone": "Smith"
}
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('category', response.data)
payload['category'] = INTERNAL_FLAG_CATEGORIES.sanctions_match
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('category', response.data)
payload['flag_type'] = FLAG_TYPES.yellow
payload['is_valid'] = None
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('is_valid', response.data)
def test_flag_type_history_is_saved(self):
flag = PartnerFlagFactory(is_valid=True)
original_type = flag.flag_type
url = reverse('partner-reviews:flag-details', kwargs={"partner_id": flag.partner.id, 'pk': flag.id})
payload = {
'flag_type': FLAG_TYPES.escalated
}
response = self.client.patch(url, data=payload, format='json')
self.assertResponseStatusIs(response)
flag.refresh_from_db()
self.assertIn(original_type, flag.type_history)
def test_cant_add_red_flag(self):
flag = PartnerFlagFactory()
url = reverse('partner-reviews:flag-details', kwargs={"partner_id": flag.partner.id, 'pk': flag.id})
payload = {
'flag_type': FLAG_TYPES.red
}
response = self.client.patch(url, data=payload, format='json')
self.assertResponseStatusIs(response, status.HTTP_403_FORBIDDEN)
def test_escalation_flow(self):
payload = {
"comment": "This is a comment on a flag",
"flag_type": FLAG_TYPES.yellow,
"contact_email": "[email protected]",
"contact_person": "Nancy",
"contact_phone": "Smith"
}
hq_editor = AgencyMemberFactory(
office=self.user.agency_members.first().office,
role=AgencyRole.HQ_EDITOR.name
)
for is_valid in (True, False):
partner = PartnerSimpleFactory(country_code=self.user.agency_members.first().office.country.code)
create_url = reverse('partner-reviews:flags', kwargs={"partner_id": partner.id})
response = self.client.post(create_url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
flag_url = reverse(
'partner-reviews:flag-details', kwargs={"partner_id": partner.id, 'pk': response.data['id']}
)
patch_response = self.client.patch(flag_url, data={
'flag_type': FLAG_TYPES.escalated
})
self.assertResponseStatusIs(patch_response)
self.assertEqual(patch_response.data['flag_type'], FLAG_TYPES.escalated)
self.assertEqual(patch_response.data['is_valid'], None)
patch_response = self.client.patch(flag_url, data={
'is_valid': is_valid,
})
self.assertResponseStatusIs(patch_response, status.HTTP_403_FORBIDDEN)
self.client.logout()
self.client.force_login(hq_editor.user)
patch_response = self.client.patch(flag_url, data={
'is_valid': is_valid,
})
self.assertResponseStatusIs(patch_response, status.HTTP_200_OK)
self.assertEqual(patch_response.data['flag_type'], FLAG_TYPES.red if is_valid else FLAG_TYPES.yellow)
self.assertFalse(patch_response.data['can_be_escalated'])
self.client.logout()
self.client.force_login(self.user)
partner.refresh_from_db()
if is_valid:
self.assertTrue(sum(partner.flagging_status.values()) > 0)
self.assertEqual(partner.is_locked, is_valid)
def test_listing_flags(self):
partner: Partner = PartnerFactory()
# My observations filter
my_flags: List[PartnerFlag] = PartnerFlagFactory.create_batch(3, partner=partner, submitter=self.user)
other_user = PartnerMemberFactory().user
other_ppl_flags: List[PartnerFlag] = PartnerFlagFactory.create_batch(3, partner=partner, submitter=other_user)
all_flags = my_flags + other_ppl_flags
list_url = reverse('partner-reviews:flags', kwargs={"partner_id": partner.pk})
list_response = self.client.get(list_url)
self.assertResponseStatusIs(list_response)
self.assertEqual(list_response.data['count'], len(all_flags))
my_flags_response = self.client.get(list_url + '?only_mine=True')
self.assertResponseStatusIs(my_flags_response)
self.assertEqual(my_flags_response.data['count'], len(my_flags))
partner.flags.all().delete()
# Type filter
observation_flags: List[PartnerFlag] = PartnerFlagFactory.create_batch(
5, partner=partner, flag_type=FLAG_TYPES.observation
)
yellow_flags: List[PartnerFlag] = PartnerFlagFactory.create_batch(
7, partner=partner, flag_type=FLAG_TYPES.yellow
)
observation_flags_response = self.client.get(list_url + f'?flag_type={FLAG_TYPES.observation}')
self.assertResponseStatusIs(observation_flags_response)
self.assertEqual(observation_flags_response.data['count'], len(observation_flags))
yellow_flags_response = self.client.get(list_url + f'?flag_type={FLAG_TYPES.yellow}')
self.assertResponseStatusIs(yellow_flags_response)
self.assertEqual(yellow_flags_response.data['count'], len(yellow_flags))
partner.flags.all().delete()
# Category filter
fraud_and_corruption_flags: List[PartnerFlag] = PartnerFlagFactory.create_batch(
5, partner=partner, category=FLAG_CATEGORIES.C02_financial
)
sex_abuse_flags: List[PartnerFlag] = PartnerFlagFactory.create_batch(
7, partner=partner, category=FLAG_CATEGORIES.C05_sex_abuse
)
fraud_and_corruption_flags_response = self.client.get(
list_url + f'?category={FLAG_CATEGORIES.C02_financial}'
)
self.assertResponseStatusIs(fraud_and_corruption_flags_response)
self.assertEqual(fraud_and_corruption_flags_response.data['count'], len(fraud_and_corruption_flags))
sex_abuse_flags_response = self.client.get(list_url + f'?category={FLAG_CATEGORIES.C05_sex_abuse}')
self.assertResponseStatusIs(sex_abuse_flags_response)
self.assertEqual(sex_abuse_flags_response.data['count'], len(sex_abuse_flags))
class TestPartnerVerificationAPITestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.HQ_EDITOR
base_payload = {
"is_mm_consistent": True,
"is_indicate_results": True,
"cert_uploaded_comment": "Comment",
"indicate_results_comment": "Comment",
"yellow_flag_comment": "Comment",
"mm_consistent_comment": "Comment",
"is_cert_uploaded": True,
"rep_risk_comment": "Comment",
"is_yellow_flag": False,
"is_rep_risk": False
}
def test_verification_create(self):
AgencyOfficeFactory.create_batch(self.quantity)
PartnerSimpleFactory.create_batch(self.quantity)
PartnerVerificationFactory.create_batch(self.quantity)
partner = Partner.objects.first()
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
# Test Verified Status
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status_code=status.HTTP_400_BAD_REQUEST)
with mock.patch('partner.models.Partner.profile_is_complete', lambda: True):
response = self.client.post(url, data=payload, format='json')
self.assertResponseStatusIs(response, status_code=status.HTTP_201_CREATED)
self.assertEquals(response.data['is_verified'], True)
# Test Unverified status
payload['is_rep_risk'] = True
response = self.client.post(url, data=payload, format='json')
self.assertEquals(response.data['is_verified'], False)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_ingo_verification_permissions(self):
partner = PartnerFactory(display_type=PARTNER_TYPES.international)
self.assertTrue(partner.is_hq)
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
roles_allowed, roles_disallowed = self.get_agency_with_and_without_permissions(
AgencyPermission.VERIFY_INGO_HQ
)
for role in roles_allowed:
self.set_current_user_role(role.name)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
for role in roles_disallowed:
self.set_current_user_role(role.name)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_403_FORBIDDEN)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_other_country_verification_permissions(self):
other_countries = [x for x in COUNTRIES if not x == self.user.agency_members.first().office.country.code]
partner = PartnerFactory(country_code=random.choice(other_countries))
self.assertNotEqual(partner.country_code, self.user.agency_members.first().office.country.code)
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
roles_allowed, roles_disallowed = self.get_agency_with_and_without_permissions(
AgencyPermission.VERIFY_CSOS_GLOBALLY
)
for role in roles_allowed:
self.set_current_user_role(role.name)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
for role in roles_disallowed:
self.set_current_user_role(role.name)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_403_FORBIDDEN)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_own_country_verification_permissions(self):
partner = PartnerFactory(country_code=self.user.agency_members.first().office.country.code)
self.assertEqual(partner.country_code, self.user.agency_members.first().office.country.code)
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
roles_allowed, roles_disallowed = self.get_agency_with_and_without_permissions(
AgencyPermission.VERIFY_CSOS_FOR_OWN_COUNTRY
)
for role in roles_allowed:
self.set_current_user_role(role.name)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
for role in roles_disallowed:
self.set_current_user_role(role.name)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_403_FORBIDDEN)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_verify_sanctioned_partner(self):
partner = PartnerFactory()
sanction_match: SanctionedNameMatch = SanctionedNameMatchFactory(partner=partner)
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_400_BAD_REQUEST)
sanction_match.can_ignore = True
sanction_match.save()
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_verify_ingo_child_before_hq(self):
hq = PartnerFactory(display_type=PARTNER_TYPES.international)
self.assertTrue(hq.is_hq)
self.assertFalse(hq.is_verified)
partner = PartnerFactory(display_type=PARTNER_TYPES.international, hq=hq)
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_400_BAD_REQUEST)
PartnerVerificationFactory(partner=hq)
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
@mock.patch('partner.models.Partner.profile_is_complete', lambda _: True)
def test_verify_flagged_partner(self):
partner = PartnerFactory()
flag = PartnerFlagFactory(partner=partner, flag_type=FLAG_TYPES.red)
url = reverse('partner-reviews:verifications', kwargs={"partner_id": partner.id})
payload = self.base_payload.copy()
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_400_BAD_REQUEST)
flag.is_valid = False
flag.save()
create_response = self.client.post(url, data=payload)
self.assertResponseStatusIs(create_response, status.HTTP_201_CREATED)
class TestRegisterSanctionedPartnerTestCase(BaseAPITestCase):
user_type = BaseAPITestCase.USER_AGENCY
agency_role = AgencyRole.HQ_EDITOR
def setUp(self):
super(TestRegisterSanctionedPartnerTestCase, self).setUp()
self.email = "[email protected]"
self.data = {
"partner": {
"legal_name": "My org legal name",
"country_code": "PL",
"display_type": PARTNER_TYPES.international,
},
"partner_profile": {
"alias_name": "Name Inc.",
"acronym": "N1",
"legal_name_change": True,
"former_legal_name": "Former Legal Name Inc.",
"year_establishment": 1900,
"have_governing_document": True,
"registered_to_operate_in_country": False,
"missing_registration_document_comment": "comment",
},
"partner_head_organization": {
"fullname": "Jack Orzeszek",
"email": "[email protected]",
},
"partner_member": {
"title": "Project Manager",
},
"governing_document": {
'document': {
'content': base64.encodebytes(b'TEST_FILE_CONTENT'),
'filename': 'testfile.doc',
},
},
"declaration": [{
'question': f'question{n}',
'answer': 'Yes',
} for n in range(random.randint(5, 10))]
}
def test_register_sanctioned_partner(self):
item_inst, _ = SanctionedItem.objects.update_or_create(
sanctioned_type=SANCTION_LIST_TYPES.entity,
data_id=123456,
)
SanctionedName.objects.get_or_create(item=item_inst, name=self.data['partner']['legal_name'])
with self.login_as_user(UserFactory()):
url = reverse('accounts:registration')
response = self.client.post(url, data=self.data)
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
partner = Partner.objects.get(id=response.data['partner']['id'])
self.assertTrue(partner.has_sanction_match)
flag = partner.flags.filter(category=INTERNAL_FLAG_CATEGORIES.sanctions_match).first()
self.assertIsNotNone(flag)
flag_url = reverse('partner-reviews:flag-details', kwargs={"partner_id": flag.partner.id, 'pk': flag.id})
flag_response = self.client.get(flag_url)
self.assertResponseStatusIs(flag_response)
payload = {
'is_valid': False,
'validation_comment': 'comment',
}
response = self.client.patch(flag_url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertFalse(response.data['is_valid'])
partner.refresh_from_db()
self.assertFalse(partner.is_locked)
self.assertFalse(partner.has_sanction_match)
payload = {
'is_valid': True,
'validation_comment': 'comment'
}
response = self.client.patch(flag_url, data=payload)
self.assertResponseStatusIs(response, status.HTTP_200_OK)
self.assertTrue(response.data['is_valid'])
partner.refresh_from_db()
self.assertTrue(partner.is_locked)
self.assertTrue(partner.has_sanction_match)
self.client.logout()
partner_member = PartnerMemberFactory(partner=partner)
user: User = partner_member.user
password = 'testing1235'
user.set_password(password)
user.save()
login_url = reverse('rest_login')
response = self.client.post(login_url, data={
'email': user.email,
'password': password,
})
self.assertResponseStatusIs(response, status.HTTP_400_BAD_REQUEST)
def test_matches_dont_duplicate(self):
item_inst, _ = SanctionedItem.objects.update_or_create(
sanctioned_type=SANCTION_LIST_TYPES.entity,
data_id=123456,
)
SanctionedName.objects.get_or_create(item=item_inst, name=self.data['partner']['legal_name'])
with self.login_as_user(UserFactory()):
url = reverse('accounts:registration')
response = self.client.post(url, data=self.data, format='json')
self.assertResponseStatusIs(response, status.HTTP_201_CREATED)
partner = Partner.objects.get(id=response.data['partner']['id'])
self.assertTrue(partner.has_sanction_match)
partner_sanction_flags = partner.flags.filter(category=INTERNAL_FLAG_CATEGORIES.sanctions_match)
flag = partner_sanction_flags.first()
self.assertIsNotNone(flag)
flag_count_before = partner_sanction_flags.count()
call_command('sanctions_list_match_scan')
self.assertEqual(partner_sanction_flags.count(), flag_count_before)
|
|
import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from collections import namedtuple
from utils import riemann_tools
from ipywidgets import interact
from ipywidgets import widgets
conserved_variables = ('Density', 'Momentum', 'Energy')
primitive_variables = ('Density', 'Velocity', 'Pressure')
Primitive_State = namedtuple('State', primitive_variables)
Conserved_State = namedtuple('State', conserved_variables)
def pospart(x):
return np.maximum(1.e-15,x)
def primitive_to_conservative(rho, u, p, gamma=1.4):
mom = rho*u
E = p/(gamma-1.) + 0.5*rho*u**2
return rho, mom, E
def conservative_to_primitive(rho, mom, E, gamma=1.4):
u = mom/pospart(rho)
p = (gamma-1.)*(E - 0.5*rho*u**2)
return rho, u, p
def cons_to_prim(q, gamma=1.4):
return conservative_to_primitive(*q,gamma=1.4)
def sound_speed(rho, p, gamma=1.4):
return np.sqrt(gamma*p/pospart(rho))
def beta(gamma):
return (gamma+1.)/(gamma-1.)
def lambda1(q, xi, gamma=1.4):
"Characteristic speed for 1-waves."
rho, u, p = conservative_to_primitive(*q, gamma=gamma)
c = sound_speed(rho, p, gamma)
return u - c
def lambda2(q, xi, gamma=1.4):
"Characteristic speed for 2-waves."
rho, u, p = conservative_to_primitive(*q, gamma=gamma)
return u
def lambda3(q, xi, gamma=1.4):
"Characteristic speed for 3-waves."
rho, u, p = conservative_to_primitive(*q, gamma=gamma)
c = sound_speed(rho, p, gamma)
return u + c
def integral_curve_1(p, rhostar, ustar, pstar, gamma=1.4):
"""Velocity as a function of pressure for the 1-integral curve passing
through (rhostar, ustar, pstar)"""
c = sound_speed(rhostar, pstar, gamma)
return ustar + 2*c/(gamma-1.)* (1.-(pospart(p)/pstar)**((gamma-1.)/(2.*gamma)))
def integral_curve_3(p, rhostar, ustar, pstar, gamma=1.4):
c = sound_speed(rhostar, pstar, gamma)
return ustar - 2*c/(gamma-1.)* (1.-(pospart(p)/pstar)**((gamma-1.)/(2.*gamma)))
def hugoniot_locus_1(p, rhostar, ustar, pstar, gamma=1.4):
c = sound_speed(rhostar, pstar, gamma)
return ustar + 2*c/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/pstar)/np.sqrt(1+beta(gamma)*p/pstar))
def hugoniot_locus_3(p, rhostar, ustar, pstar, gamma=1.4):
c = sound_speed(rhostar, pstar, gamma)
return ustar - 2*c/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/pstar)/np.sqrt(1+beta(gamma)*p/pstar))
def exact_riemann_solution(q_l, q_r, gamma=1.4, phase_plane_curves=False):
"""Return the exact solution to the Riemann problem with initial states
q_l, q_r. The solution is given in terms of a list of states, a list of
speeds (each of which may be a pair in case of a rarefaction fan), and a
function reval(xi) that gives the solution at a point xi=x/t.
The input and output vectors are the conserved quantities.
If phase_plane_curves==True, then the appropriate Hugoniot Locus and/or
integral curve is returned for the 1- and 3-waves.
"""
rho_l, u_l, p_l = conservative_to_primitive(*q_l)
rho_r, u_r, p_r = conservative_to_primitive(*q_r)
# Compute left and right state sound speeds
c_l = sound_speed(rho_l, p_l, gamma)
c_r = sound_speed(rho_r, p_r, gamma)
ws = np.zeros(5)
wave_types = ['', 'contact', '']
if rho_l == 0:
# 3-rarefaction connecting right state to vacuum
p = 0.
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_r = integral_curve_3(0., rho_r, u_r, p_r, gamma)
u = u_vacuum_r
ws[0] = 0.
ws[1] = 0.
ws[2] = 0.
ws[3] = u_vacuum_r
ws[4] = u_r + c_r
wave_types = ['contact', 'contact', 'raref']
elif rho_r == 0:
# 1-rarefaction connecting left state to vacuum
p = 0
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_l = integral_curve_1(0., rho_l, u_l, p_l, gamma)
u = u_vacuum_l
ws[0] = u_l - c_l
ws[1] = u_vacuum_l
ws[2] = 0.
ws[3] = 0.
ws[4] = 0.
wave_types = ['raref', 'contact', 'contact']
elif u_l - u_r + 2*(c_l+c_r)/(gamma-1.) < 0:
# Middle states are vacuum
p = 0.
rho_l_star = 0.
rho_r_star = 0.
u_vacuum_l = integral_curve_1(0., rho_l, u_l, p_l, gamma)
u_vacuum_r = integral_curve_3(0., rho_r, u_r, p_r, gamma)
u = 0.5*(u_vacuum_l + u_vacuum_r)
ws[0] = u_l - c_l
ws[1] = u_vacuum_l
ws[2] = u
ws[3] = u_vacuum_r
ws[4] = u_r + c_r
wave_types = ['raref', 'contact', 'raref']
else:
# Check whether the 1-wave is a shock or rarefaction
def phi_l(p):
if p >= p_l: return hugoniot_locus_1(p, rho_l, u_l, p_l, gamma)
else: return integral_curve_1(p, rho_l, u_l, p_l, gamma)
# Check whether the 1-wave is a shock or rarefaction
def phi_r(p):
if p >= p_r: return hugoniot_locus_3(p, rho_r, u_r, p_r, gamma)
else: return integral_curve_3(p, rho_r, u_r, p_r, gamma)
phi = lambda p: phi_l(p)-phi_r(p)
exp = (1.-gamma)/(2.*gamma)
guess = ((c_l + c_r - (gamma-1.)*(u_r-u_l)/2.)/(c_l*p_l**exp+c_r*p_r**exp))**(-1./exp)
# Compute middle state p, u by finding curve intersection
p, info, ier, msg = fsolve(phi, guess, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p, info, ier, msg = fsolve(phi, guess, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
print(msg)
u = phi_l(p)
ws[2] = u
# Find shock and rarefaction speeds
if p > p_l:
wave_types[0] = 'shock'
rho_l_star = rho_l*(1+beta(gamma)*p/p_l)/(p/p_l+beta(gamma))
ws[0] = (rho_l*u_l - rho_l_star*u)/(rho_l - rho_l_star)
ws[1] = ws[0]
else:
wave_types[0] = 'raref'
rho_l_star = (p/p_l)**(1./gamma) * rho_l
c_l_star = sound_speed(rho_l_star, p, gamma)
ws[0] = u_l - c_l
ws[1] = u - c_l_star
if p > p_r:
wave_types[2] = 'shock'
rho_r_star = rho_r*(1+beta(gamma)*p/p_r)/(p/p_r+beta(gamma))
ws[4] = (rho_r*u_r - rho_r_star*u)/(rho_r - rho_r_star)
ws[3] = ws[4]
else:
wave_types[2] = 'raref'
rho_r_star = (p/p_r)**(1./gamma) * rho_r
c_r_star = sound_speed(rho_r_star, p, gamma)
ws[3] = u + c_r_star
ws[4] = u_r + c_r
# Find solution inside rarefaction fans (in primitive variables)
def raref1(xi):
u1 = ((gamma-1.)*u_l + 2*(c_l + xi))/(gamma+1.)
rho1 = (rho_l**gamma*(u1-xi)**2/pospart(gamma*p_l))**(1./(gamma-1.))
p1 = p_l*(rho1/pospart(rho_l))**gamma
return rho1, u1, p1
def raref3(xi):
u3 = ((gamma-1.)*u_r - 2*(c_r - xi))/(gamma+1.)
rho3 = (rho_r**gamma*(xi-u3)**2/pospart(gamma*p_r))**(1./(gamma-1.))
p3 = p_r*(rho3/pospart(rho_r))**gamma
return rho3, u3, p3
q_l_star = np.squeeze(np.array(primitive_to_conservative(rho_l_star,u,p)))
q_r_star = np.squeeze(np.array(primitive_to_conservative(rho_r_star,u,p)))
states = np.column_stack([q_l,q_l_star,q_r_star,q_r])
speeds = [[], ws[2], []]
if wave_types[0] in ['shock','contact']:
speeds[0] = ws[0]
else:
speeds[0] = (ws[0],ws[1])
if wave_types[2] in ['shock','contact']:
speeds[2] = ws[3]
else:
speeds[2] = (ws[3],ws[4])
def reval(xi):
r"""Returns the Riemann solution in primitive variables for any
value of xi = x/t.
"""
rar1 = raref1(xi)
rar3 = raref3(xi)
rho_out = (xi<=ws[0] )*rho_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[0] \
+ (xi>ws[1])*(xi<=ws[2] )*rho_l_star \
+ (xi>ws[2]) *(xi<=ws[3])*rho_r_star \
+ (xi>ws[3])*(xi<=ws[4])*rar3[0] \
+ (xi>ws[4] )*rho_r
u_out = (xi<=ws[0] )*u_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[1] \
+ (xi>ws[1])*(xi<=ws[2] )*u \
+ (xi>ws[2] )*(xi<=ws[3])*u \
+ (xi>ws[3])*(xi<=ws[4])*rar3[1] \
+ (xi>ws[4] )*u_r
p_out = (xi<=ws[0] )*p_l \
+ (xi>ws[0])*(xi<=ws[1])*rar1[2] \
+ (xi>ws[1])*(xi<=ws[2] )*p \
+ (xi>ws[2] )*(xi<=ws[3])*p \
+ (xi>ws[3])*(xi<=ws[4])*rar3[2] \
+ (xi>ws[4] )*p_r
return primitive_to_conservative(rho_out,u_out,p_out)
if phase_plane_curves:
if wave_types[0] == 'raref':
phi1 = lambda p: integral_curve_1(p, rho_l, u_l, p_l, gamma)
elif wave_types[0] == 'shock':
phi1 = lambda p: hugoniot_locus_1(p, rho_l, u_l, p_l, gamma)
else:
phi1 = lambda p: p
if wave_types[2] == 'raref':
phi3 = lambda p: integral_curve_3(p, rho_r, u_r, p_r, gamma)
elif wave_types[2] == 'shock':
phi3 = lambda p: hugoniot_locus_3(p, rho_r, u_r, p_r, gamma)
else:
phi3 = lambda p: p
return states, speeds, reval, wave_types, (p, phi1, phi3)
else:
return states, speeds, reval, wave_types
def phase_plane_plot(left_state, right_state, gamma=1.4, ax=None, approx_states=None,
cons_inputs=False):
r"""Plot the Hugoniot loci or integral curves in the p-u plane."""
import matplotlib.lines as mlines
if ax is None:
fig, ax = plt.subplots()
if cons_inputs:
q_left = left_state.copy()
q_right = right_state.copy()
left_state = Primitive_State(*conservative_to_primitive(*q_left))
right_state = Primitive_State(*conservative_to_primitive(*q_right))
else:
q_left = primitive_to_conservative(*left_state)
q_right = primitive_to_conservative(*right_state)
# Solve Riemann problem
ex_states, ex_speeds, reval, wave_types, ppc = \
exact_riemann_solution(q_left, q_right, gamma,
phase_plane_curves=True)
pm, w1, w3 = ppc
x = [left_state.Pressure,pm,right_state.Pressure]
y = [left_state.Velocity, w1(pm), right_state.Velocity]
if left_state.Pressure == 0:
c_r = sound_speed(right_state.Density, right_state.Pressure, gamma)
y[1] = right_state.Velocity - 2*c_r/(gamma-1.)
if right_state.Pressure == 0:
c_l = sound_speed(left_state.Density, left_state.Pressure, gamma)
y[1] = left_state.Velocity - 2*c_l/(gamma-1.)
xmax, xmin = max(x), min(x)
ymax, ymin = max(y), min(y)
dx, dy = xmax - xmin, ymax - ymin
w1v, w3v = (np.vectorize(w1), np.vectorize(w3))
ax.set_xlabel('Pressure (p)')
ax.set_ylabel('Velocity (u)')
ax.set_title('Phase plane')
pa = np.linspace(0.,left_state.Pressure,500)
pb = np.linspace(left_state.Pressure,xmax+0.5*dx)
ua = w1v(pa)
ub = w1v(pb)
if wave_types[0] == 'shock':
style1 = '--r'
style2 = '-r'
elif wave_types[0] == 'raref':
style1 = '-b'
style2 = '--b'
else:
style1 = '-w'
style2 = '-w'
ax.plot(pa,ua,style1)
ax.plot(pb,ub,style2)
pa = np.linspace(0.,right_state.Pressure,500)
pb = np.linspace(right_state.Pressure,xmax+0.5*dx)
ua = w3v(pa)
ub = w3v(pb)
if wave_types[2] == 'shock':
style1 = '--r'
style2 = '-r'
elif wave_types[2] == 'raref':
style1 = '-b'
style2 = '--b'
else:
style1 = '-w'
style2 = '-w'
ax.plot(pa,ua,style1)
ax.plot(pb,ub,style2)
msize = 8
ax.plot(x[0],y[0],'<k',markersize=msize,label='Left')
ax.plot(x[1],y[1],'ok',markersize=msize,label='Middle')
ax.plot(x[2],y[2],'>k',markersize=msize,label='Right')
# add legends only for Left, Middle, Right:
handles = []
handle = mlines.Line2D([], [], color='k', linestyle='', marker='<',
label='Left state')
handles.append(handle)
handle = mlines.Line2D([], [], color='k', linestyle='', marker='o',
label='Middle state')
handles.append(handle)
handle = mlines.Line2D([], [], color='k', linestyle='', marker='>',
label='Right state')
handles.append(handle)
plt.legend(handles=handles, fontsize=8)
if approx_states is not None:
p_approx = []
u_approx = []
for j in range(approx_states.shape[1]):
rho, u, p = cons_to_prim(approx_states[:,j],gamma=gamma)
p_approx.append(p)
u_approx.append(u)
ax.plot(p_approx,u_approx,'-g',zorder=0)
# don't plot the left and right states as dots, only middle states:
ax.plot(p_approx[1:-1],u_approx[1:-1],'og',markersize=8,zorder=0)
xlimits = ax.get_xlim()
if xlimits[0] <= 0.:
# shift xlimits to better show vacuum state:
x0 = min(xlimits[0], -0.05*(xlimits[1] - xlimits[0]))
ax.set_xlim(x0,xlimits[1])
ylimits = ax.get_ylim()
ax.plot([0,0], ylimits, 'k-', linewidth=0.6) # y-axis
def plot_integral_curves(plot_1=True,plot_3=False,gamma=1.4,rho_0=1.):
N = 400
p = np.linspace(0.,5,N)
p_0 = 1.
uu = np.linspace(-3,3,15)
c_0 = np.sqrt(gamma*p_0/rho_0)
if plot_1:
for u_0 in uu:
u = u_0 + (2*c_0)/(gamma-1.)* \
(1.-(p/p_0)**((gamma-1)/(2*gamma)))
plt.plot(p,u,color='coral')
if plot_3:
for u_0 in uu:
u = u_0 - (2*c_0)/(gamma-1.)* \
(1.-(p/p_0)**((gamma-1)/(2*gamma)))
plt.plot(p,u,color='maroon')
plt.xlabel('p'); plt.ylabel('u')
plt.title('Integral curves projected to p-u plane')
plt.show()
def plot_hugoniot_loci(plot_1=True,plot_3=False,gamma=1.4,rho_0=1.):
N = 400
p = np.linspace(1.e-3,5,N)
p_0 = 1.
uu = np.linspace(-3,3,15)
c_0 = np.sqrt(gamma*p_0/rho_0)
beta = (gamma+1.)/(gamma-1.)
if plot_1:
for u_0 in uu:
u_1 = u_0 + (2*c_0)/np.sqrt(2*gamma*(gamma-1.))* \
(1.-p/p_0)/(np.sqrt(1+beta*p/p_0))
plt.plot(p,u_1,color='coral')
if plot_3:
for u_0 in uu:
u_1 = u_0 - (2*c_0)/np.sqrt(2*gamma*(gamma-1.))* \
(1.-p/p_0)/(np.sqrt(1+beta*p/p_0))
plt.plot(p,u_1,color='maroon')
plt.xlabel('p'); plt.ylabel('u')
plt.title('Hugoniot Loci projected to p-u plane')
plt.show()
def riemann_solution(left_state, right_state, gamma=1.4):
q_left = primitive_to_conservative(*left_state)
q_right = primitive_to_conservative(*right_state)
ex_states, ex_speeds, reval, wave_types = exact_riemann_solution(q_left ,q_right, gamma)
plot_function = riemann_tools.make_plot_function(ex_states, ex_speeds, reval, wave_types,
layout='vertical',
vertical_spacing=0.15,
variable_names=primitive_variables,
plot_chars=[lambda1,lambda2,lambda3],
derived_variables=cons_to_prim)
interact(plot_function, t=widgets.FloatSlider(value=0.5,min=0,max=.9),
which_char=widgets.Dropdown(options=[None,1,2,3],description='Show characteristics:',
style={'description_width':'initial'}))
def plot_riemann_trajectories(q_l, q_r, gamma=1.4, primitive=False):
if primitive:
q_left = primitive_to_conservative(*q_l)
q_right = primitive_to_conservative(*q_r)
else:
q_left = q_l
q_right = q_r
ex_states, ex_speeds, reval, wave_types = exact_riemann_solution(q_left ,q_right, gamma=gamma)
def reval_rho_u(x):
q = reval(x)
rho = q[0]
u = q[1]/q[0]
rho_u = np.vstack((rho,u))
return rho_u
# Specify density of trajectories to left and right:
rho_l = q_left[0] / 10.
rho_r = q_right[0] / 10.
x_traj, t_traj, xmax = riemann_tools.compute_riemann_trajectories(ex_states,
ex_speeds,
reval_rho_u,
wave_types,
i_vel=1,
rho_left=rho_l,
rho_right=rho_r)
riemann_tools.plot_riemann_trajectories(x_traj, t_traj, ex_speeds, wave_types)
|
|
# Copyright 2012 OpenStack Foundation.
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import webob.exc
from glance.api import policy
from glance.api.v1 import controller
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance.i18n import _
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('image_member_quota', 'glance.common.config')
class Controller(controller.BaseController):
def __init__(self):
self.policy = policy.Enforcer()
def _check_can_access_image_members(self, context):
if context.owner is None and not context.is_admin:
raise webob.exc.HTTPUnauthorized(_("No authenticated user"))
def _enforce(self, req, action):
"""Authorize an action against our policies"""
try:
self.policy.enforce(req.context, action, {})
except exception.Forbidden:
LOG.debug("User not permitted to perform '%s' action", action)
raise webob.exc.HTTPForbidden()
def _raise_404_if_image_deleted(self, req, image_id):
image = self.get_image_meta_or_404(req, image_id)
if image['status'] == 'deleted':
msg = _("Image with identifier %s has been deleted.") % image_id
raise webob.exc.HTTPNotFound(msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The opaque image identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'members': [
{'member_id': <MEMBER>,
'can_share': <SHARE_PERMISSION>, ...}, ...
]}
"""
self._enforce(req, 'get_members')
self._raise_404_if_image_deleted(req, image_id)
try:
members = registry.get_image_members(req.context, image_id)
except exception.NotFound:
msg = _("Image with identifier %s not found") % image_id
LOG.warn(msg)
raise webob.exc.HTTPNotFound(msg)
except exception.Forbidden:
msg = _("Unauthorized image access")
LOG.warn(msg)
raise webob.exc.HTTPForbidden(msg)
return dict(members=members)
@utils.mutating
def delete(self, req, image_id, id):
"""
Removes a membership from the image.
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'delete_member')
self._raise_404_if_image_deleted(req, image_id)
try:
registry.delete_member(req.context, image_id, id)
self._update_store_acls(req, image_id)
except exception.NotFound as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug("User not permitted to remove membership from image "
"'%s'", image_id)
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
def default(self, req, image_id, id, body=None):
"""This will cover the missing 'show' and 'create' actions"""
raise webob.exc.HTTPMethodNotAllowed()
def _enforce_image_member_quota(self, req, attempted):
if CONF.image_member_quota < 0:
# If value is negative, allow unlimited number of members
return
maximum = CONF.image_member_quota
if attempted > maximum:
msg = _("The limit has been exceeded on the number of allowed "
"image members for this image. Attempted: %(attempted)s, "
"Maximum: %(maximum)s") % {'attempted': attempted,
'maximum': maximum}
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
@utils.mutating
def update(self, req, image_id, id, body=None):
"""
Adds a membership to the image, or updates an existing one.
If a body is present, it is a dict with the following format
.. code-block:: json
{'member': {
'can_share': [True|False]
}}
If `can_share` is provided, the member's ability to share is
set accordingly. If it is not provided, existing memberships
remain unchanged and new memberships default to False.
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'modify_member')
self._raise_404_if_image_deleted(req, image_id)
new_number_of_members = len(registry.get_image_members(req.context,
image_id)) + 1
self._enforce_image_member_quota(req, new_number_of_members)
# Figure out can_share
can_share = None
if body and 'member' in body and 'can_share' in body['member']:
can_share = bool(body['member']['can_share'])
try:
registry.add_member(req.context, image_id, id, can_share)
self._update_store_acls(req, image_id)
except exception.Invalid as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.NotFound as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
@utils.mutating
def update_all(self, req, image_id, body):
"""
Replaces the members of the image with those specified in the
body. The body is a dict with the following format
.. code-block:: json
{'memberships': [
{'member_id': <MEMBER_ID>,
['can_share': [True|False]]}, ...
]}
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'modify_member')
self._raise_404_if_image_deleted(req, image_id)
memberships = body.get('memberships')
if memberships:
new_number_of_members = len(body['memberships'])
self._enforce_image_member_quota(req, new_number_of_members)
try:
registry.replace_members(req.context, image_id, body)
self._update_store_acls(req, image_id)
except exception.Invalid as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.NotFound as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
return webob.exc.HTTPNoContent()
def index_shared_images(self, req, id):
"""
Retrieves list of image memberships for the given member.
:param req: the Request object coming from the wsgi layer
:param id: the opaque member identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'shared_images': [
{'image_id': <IMAGE>,
'can_share': <SHARE_PERMISSION>, ...}, ...
]}
"""
try:
members = registry.get_member_images(req.context, id)
except exception.NotFound as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPForbidden(explanation=e.msg)
return dict(shared_images=members)
def _update_store_acls(self, req, image_id):
image_meta = self.get_image_meta_or_404(req, image_id)
location_uri = image_meta.get('location')
public = image_meta.get('is_public')
self.update_store_acls(req, image_id, location_uri, public)
def create_resource():
"""Image members resource factory method"""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
|
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the collection editor page."""
from core.domain import collection_services
from core.domain import collection_domain
from core.domain import rights_manager
from core.tests import test_utils
import feconf
class BaseCollectionEditorControllerTest(test_utils.GenericTestBase):
def setUp(self):
"""Completes the sign-up process for self.EDITOR_EMAIL."""
super(BaseCollectionEditorControllerTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.json_dict = {
'version' : 1,
'commit_message' : 'changed title',
'change_list' : [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'A new title'
}]
}
class CollectionEditorTest(BaseCollectionEditorControllerTest):
COLLECTION_ID = '0'
def setUp(self):
super(CollectionEditorTest, self).setUp()
collection_services.load_demo(self.COLLECTION_ID)
rights_manager.release_ownership_of_collection(
feconf.SYSTEM_COMMITTER_ID, self.COLLECTION_ID)
def test_access_collection_editor_page(self):
"""Test access to editor pages for the sample collection."""
whitelisted_usernames = [self.EDITOR_USERNAME]
self.set_collection_editors(whitelisted_usernames)
# Check that it is possible to access a page with specific version
# number.
response = self.testapp.get(
'%s/%s?v=1' % (feconf.COLLECTION_DATA_URL_PREFIX,
self.COLLECTION_ID))
self.assertEqual(response.status_int, 200)
# Check that non-editors cannot access the editor page. This is due
# to them not being whitelisted.
response = self.testapp.get(
'%s/%s' % (feconf.COLLECTION_EDITOR_URL_PREFIX,
self.COLLECTION_ID))
self.assertEqual(response.status_int, 302)
# Check that whitelisted users can access and edit in the editor page.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(
'%s/%s' % (feconf.COLLECTION_EDITOR_URL_PREFIX,
self.COLLECTION_ID))
self.assertEqual(response.status_int, 200)
json_response = self.get_json(
'%s/%s' % (feconf.COLLECTION_RIGHTS_PREFIX, self.COLLECTION_ID))
self.assertTrue(json_response['can_edit'])
self.logout()
def test_editable_collection_handler_get(self):
whitelisted_usernames = [self.EDITOR_USERNAME]
self.set_collection_editors(whitelisted_usernames)
# Check that non-editors cannot access the editor data handler.
# This is due to them not being whitelisted.
response = self.testapp.get(
'%s/%s' % (feconf.EDITABLE_COLLECTION_DATA_URL_PREFIX,
self.COLLECTION_ID))
self.assertEqual(response.status_int, 302)
# Check that whitelisted users can access the data
# from the editable_collection_data_handler
self.login(self.EDITOR_EMAIL)
json_response = self.get_json(
'%s/%s' % (feconf.EDITABLE_COLLECTION_DATA_URL_PREFIX,
self.COLLECTION_ID))
self.assertEqual(self.COLLECTION_ID, json_response['collection']['id'])
self.logout()
def test_editable_collection_handler_put_cannot_access(self):
"""Check that non-editors cannot access editable put handler"""
whitelisted_usernames = [self.EDITOR_USERNAME, self.VIEWER_USERNAME]
self.set_collection_editors(whitelisted_usernames)
# Assign viewer role to collection.
rights_manager.create_new_collection_rights(
self.COLLECTION_ID, self.owner_id)
rights_manager.assign_role_for_collection(
self.admin_id, self.COLLECTION_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
rights_manager.publish_collection(self.owner_id, self.COLLECTION_ID)
self.login(self.VIEWER_EMAIL)
# Call get handler to return the csrf token.
response = self.testapp.get(
'%s/%s' % (feconf.COLLECTION_URL_PREFIX,
self.COLLECTION_ID))
csrf_token = self.get_csrf_token_from_response(response)
# Ensure viewers do not have access to the PUT Handler.
json_response = self.put_json(
'%s/%s' % (feconf.EDITABLE_COLLECTION_DATA_URL_PREFIX,
self.COLLECTION_ID),
self.json_dict, expect_errors=True,
csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(json_response['code'], 401)
self.logout()
def test_editable_collection_handler_put_can_access(self):
"""Check that editors can access put handler"""
whitelisted_usernames = [self.EDITOR_USERNAME, self.VIEWER_USERNAME]
self.set_collection_editors(whitelisted_usernames)
rights_manager.create_new_collection_rights(
self.COLLECTION_ID, self.owner_id)
rights_manager.assign_role_for_collection(
self.admin_id, self.COLLECTION_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
rights_manager.publish_collection(self.owner_id, self.COLLECTION_ID)
self.login(self.EDITOR_EMAIL)
# Call get handler to return the csrf token.
response = self.testapp.get(
'%s/%s' % (feconf.COLLECTION_URL_PREFIX,
self.COLLECTION_ID))
csrf_token = self.get_csrf_token_from_response(response)
json_response = self.put_json(
'%s/%s' % (feconf.EDITABLE_COLLECTION_DATA_URL_PREFIX,
self.COLLECTION_ID),
self.json_dict, csrf_token=csrf_token)
self.assertEqual(self.COLLECTION_ID, json_response['collection']['id'])
self.assertEqual(2, json_response['collection']['version'])
self.logout()
def test_collection_rights_handler(self):
collection_id = 'collection_id'
collection = collection_domain.Collection.create_default_collection(
collection_id, 'A title', 'A Category', 'An Objective')
collection_services.save_new_collection(self.owner_id, collection)
# Check that collection is published correctly.
rights_manager.assign_role_for_collection(
self.owner_id, collection_id, self.editor_id,
rights_manager.ROLE_EDITOR)
rights_manager.publish_collection(self.owner_id, collection_id)
# Check that collection cannot be unpublished by non admin.
with self.assertRaisesRegexp(
Exception, 'This collection cannot be unpublished.'):
rights_manager.unpublish_collection(self.owner_id, collection_id)
collection_rights = rights_manager.get_collection_rights(collection_id)
self.assertEqual(collection_rights.status,
rights_manager.ACTIVITY_STATUS_PUBLIC)
# Check that collection can be unpublished by admin.
rights_manager.unpublish_collection(self.admin_id, collection_id)
collection_rights = rights_manager.get_collection_rights(collection_id)
self.assertEqual(collection_rights.status,
rights_manager.ACTIVITY_STATUS_PRIVATE)
def test_get_collection_rights(self):
whitelisted_usernames = [self.OWNER_USERNAME]
self.set_collection_editors(whitelisted_usernames)
self.login(self.OWNER_EMAIL)
collection_id = 'collection_id'
collection = collection_domain.Collection.create_default_collection(
collection_id, 'A title', 'A Category', 'An Objective')
collection_services.save_new_collection(self.owner_id, collection)
# Check that collection is published correctly.
rights_manager.publish_collection(self.owner_id, collection_id)
json_response = self.get_json(
'%s/%s' % (feconf.COLLECTION_RIGHTS_PREFIX, self.COLLECTION_ID))
self.assertTrue(json_response['can_edit'])
self.assertFalse(json_response['can_unpublish'])
self.assertEqual(self.COLLECTION_ID, json_response['collection_id'])
self.assertFalse(json_response['is_private'])
self.logout()
|
|
# -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from event_log import EventLog
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
event_log = EventLog()
manifest = None
_optparse = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
epilog = 'Run `repo help %s` to view the detailed manual.' % self.NAME
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def ValidateOptions(self, opt, args):
"""Validate the user options & arguments before executing.
This is meant to help break the code up into logical steps. Some tips:
* Use self.OptionParser.error to display CLI related errors.
* Adjust opt member defaults as makes sense.
* Adjust the args list, but do so inplace so the caller sees updates.
* Try to avoid updating self state. Leave that to Execute.
"""
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while (path and
path != oldpath and
path != manifest.topdir):
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, manifest=None, groups='', missing_ok=False,
submodules_ok=False):
"""A list of projects that match the arguments.
"""
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
mp = manifest.manifestProject
if not groups:
groups = mp.config.GetString('manifest.groups')
if not groups:
groups = 'default,platform-' + platform.system().lower()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(groups):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
# We have to filter by manifest groups in case the requested project is
# checked out multiple times or differently based on them.
projects = [project for project in manifest.GetProjectsWithName(arg)
if project.MatchesGroups(groups)]
if not projects:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(manifest, path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and (submodules_ok or
project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(manifest, path) or project
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError('%s (%s)' % (arg, project.relpath))
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
match = pattern.search(project.name) or pattern.search(project.relpath)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(key=lambda project: project.relpath)
return result
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, _opt):
return True
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
class GitcAvailableCommand(object):
"""Command that requires GITC to be available, but does
not require the local client to be a GITC client.
"""
class GitcClientCommand(object):
"""Command that requires the local client to be a GITC
client.
"""
|
|
import tempfile
from pyparsing import ParseSyntaxException, ParseException
import pytest
from pyhocon import ConfigFactory, ConfigSubstitutionException
from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException
class TestConfigParser(object):
def test_parse_simple_value(self):
config = ConfigFactory.parse_string(
"""t = {
c = 5
"d" = true
e.y = {
f: 7
g: "hey dude!"
h: hey man!
i = \"\"\"
"first line"
"second" line
\"\"\"
}
j = [1, 2, 3]
u = 192.168.1.3/32
}
"""
)
assert config.get_string('t.c') == '5'
assert config.get_int('t.c') == 5
assert config.get('t.e.y.f') == 7
assert config.get('t.e.y.g') == 'hey dude!'
assert config.get('t.e.y.h') == 'hey man!'
assert [l.strip() for l in config.get('t.e.y.i').split('\n')] == ['', '"first line"', '"second" line', '']
assert config.get_bool('t.d') is True
assert config.get_int('t.e.y.f') == 7
assert config.get('t.j') == [1, 2, 3]
assert config.get('t.u') == '192.168.1.3/32'
def test_parse_with_enclosing_brace(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: 5
}
}
"""
)
assert config.get_string('a.b') == '5'
def test_parse_with_enclosing_square_bracket(self):
config = ConfigFactory.parse_string("[1, 2, 3]")
assert config == [1, 2, 3]
def test_quoted_key_with_dots(self):
config = ConfigFactory.parse_string(
"""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
"""
)
assert config['"a.b.c.d"'] == 3
assert config['t.d.c'] == 5
assert config['k."b.f.d"'] == 7
def test_comma_to_separate_expr(self):
config = ConfigFactory.parse_string(
"""
a=1,
b="abc",
c=the man!,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
"""
)
assert config.get('a') == 1
assert config.get('b') == 'abc'
assert config.get('c') == 'the man!'
assert config.get('d') == 'woof'
assert config.get('a-b-c-d') == 'test'
assert config.get('a b c d') == 'test2'
assert config.get('a b c d e') == 'test3'
def test_dict_merge(self):
config = ConfigFactory.parse_string(
"""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")
expected_result = {
"a": {
"d": {
"g": {
"h": {
"j": {
"u": 5
},
"d": 4,
"k": "f d"
}
}
},
"h": {
"i": {
"m": 7,
"d": 5,
"e": 65
}
}
}
}
assert expected_result == config
def test_parse_with_comments(self):
config = ConfigFactory.parse_string(
"""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
"""
)
assert config.get('c') == 'test'
assert config.get('g') == '6 test'
assert config.get('a.b') == 'test'
assert config.get_string('a.b') == 'test'
assert config.get('t') == [1, 2, 3]
def test_missing_config(self):
config = ConfigFactory.parse_string(
"""
a = 5
"""
)
# b is not set so show raise an exception
with pytest.raises(ConfigMissingException):
config.get('b')
def test_parse_null(self):
config = ConfigFactory.parse_string(
"""
a = null
"""
)
assert config.get('a') is None
def test_parse_empty(self):
config = ConfigFactory.parse_string(
"""
a =
b = // test
c = # test
d = ,
e = , // test
f = , # test
"""
)
assert config.get('a') == ''
assert config.get('b') == ''
assert config.get('c') == ''
def test_parse_override(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('a.b.d') == 8
def test_concat_dict(self):
config = ConfigFactory.parse_string(
"""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
"""
)
assert config.get('a.b') == 1
assert config.get('a.c') == 2
assert config.get('b.c') == 5
assert config.get('b.d') == 4
def test_concat_string(self):
config = ConfigFactory.parse_string(
"""
a = a b c
b = 5 b
c = b 7
"""
)
assert config.get('a') == 'a b c'
assert config.get('b') == '5 b'
assert config.get('c') == 'b 7'
def test_concat_list(self):
config = ConfigFactory.parse_string(
"""
a = [1, 2] [3, 4] [
5,
6
]
"""
)
assert config.get('a') == [1, 2, 3, 4, 5, 6]
assert config.get_list('a') == [1, 2, 3, 4, 5, 6]
def test_bad_concat(self):
ConfigFactory.parse_string('a = 45\n')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = [4] "4"')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = "4" [5]')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = {b: 5} "4"')
def test_string_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
"""
)
assert config1.get('a.b.c') == 'str'
assert config1.get('d') == 'str'
assert config1.get('f') == 'str '
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
"""
)
assert config2.get('a.b.c') == 'str'
assert config2.get('d') == 'test str'
assert config2.get('f') == 'test str '
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
"""
)
assert config3.get('a.b.c') == 'str'
assert config3.get('d') == 'test str me'
assert config3.get('f') == 'test str me'
def test_int_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
"""
)
assert config1.get('a.b.c') == 5
assert config1.get('d') == 5
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
"""
)
assert config2.get('a.b.c') == 5
assert config2.get('d') == 'test 5'
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
"""
)
assert config3.get('a.b.c') == 5
assert config3.get('d') == 'test 5 me'
def test_cascade_string_substitutions(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('d') == 'test 7 me'
def test_dict_substitutions(self):
config = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
"""
)
assert config.get('data-center-east.cluster-size') == 6
assert config.get('data-center-east.name') == 'east'
config2 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
"""
)
assert config2.get('data-center-east.cluster-size') == 6
assert config2.get('data-center-east.name') == 'east'
config3 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
"""
)
assert config3.get('data-center-east.cluster-size') == 9
assert config3.get('data-center-east.name') == 'east'
assert config3.get('data-center-east.opts') == '-Xmx4g'
config4 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
"""
)
assert config4.get('data-center-east.cluster-size') == 6
assert config4.get('data-center-east.name') == 'east'
assert config4.get('data-center-east-prod.cluster-size') == 6
assert config4.get('data-center-east-prod.tmpDir') == '/tmp'
def test_list_substitutions(self):
config = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = ${common_modules} [java]
"""
)
assert config.get('host_modules') == ['php', 'python', 'java']
config2 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules}
"""
)
assert config2.get('host_modules') == ['java', 'php', 'python']
config3 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
"""
)
assert config3.get('common_modules') == ['php', 'python']
assert config3.get('host_modules') == ['java', 'php', 'python', 'perl']
config4 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
"""
)
assert config4.get('common_modules') == ['php', 'python']
assert config4.get('host_modules') == ['java', 'php', 'python', 'perl']
assert config4.get('full_modules') == ['java', 'php', 'python', 'perl', 'c', 'go']
def test_non_existent_substitution(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent} abc
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent} def
"""
)
def test_non_compatible_substitution(self):
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = 55 ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} 55
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} aa
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
def test_concat_multi_line_string(self):
config = ConfigFactory.parse_string(
"""
common_modules = perl \
java \
python
"""
)
assert [x.strip() for x in config['common_modules'].split() if x.strip(' ') != ''] == ['perl', 'java', 'python']
def test_concat_multi_line_list(self):
config = ConfigFactory.parse_string(
"""
common_modules = [perl] \
[java] \
[python]
"""
)
assert config['common_modules'] == ['perl', 'java', 'python']
def test_concat_multi_line_dict(self):
config = ConfigFactory.parse_string(
"""
common_modules = {a:perl} \
{b:java} \
{c:python}
"""
)
assert config['common_modules'] == {'a': 'perl', 'b': 'java', 'c': 'python'}
def test_parse_URL_from_samples(self):
config = ConfigFactory.parse_URL("file:samples/aws.conf")
assert config.get('data-center-generic.cluster-size') == 6
assert config.get('large-jvm-opts') == ['-XX:+UseParNewGC', '-Xm16g']
def test_include_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/animals.conf")
assert config.get('cat.garfield.say') == 'meow'
assert config.get('dog.mutt.hates.garfield.say') == 'meow'
def test_list_of_dicts(self):
config = ConfigFactory.parse_string(
"""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2},
{'a': 3, 'c': 4}
]
def test_list_of_lists(self):
config = ConfigFactory.parse_string(
"""
a: [
[1, 2]
[3, 4]
]
"""
)
assert config['a'] == [
[1, 2],
[3, 4]
]
def test_list_of_dicts_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2, 'f': 4},
{'a': 3, 'c': 4, 'f': 4},
{'a': 3, 'c': 6, 'f': 4}
]
def test_list_of_lists_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
"""
)
assert config['a'] == [
[5, 6, 1, 2],
[3, 4, 5, 6],
[1, 2, 5, 6, 7, 8]
]
def test_invalid_assignment(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('common_modules [perl]')
with pytest.raises(ParseException):
ConfigFactory.parse_string('common_modules {} {perl: 1}')
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {f: 5}
common_modules ${a} {perl: 1}
""")
def test_invalid_dict(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {
f: 5
g
}
""")
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('a = {g}')
def test_include_list(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('[1, 2]')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: [
include "{tmp_file}"
3
4
]
""".format(tmp_file=fdin.name)
)
assert config1['a'] == [1, 2, 3, 4]
config2 = ConfigFactory.parse_string(
"""
a: [
3
4
include "{tmp_file}"
]
""".format(tmp_file=fdin.name)
)
assert config2['a'] == [3, 4, 1, 2]
config3 = ConfigFactory.parse_string(
"""
a: [
3
include "{tmp_file}"
4
]
""".format(tmp_file=fdin.name)
)
assert config3['a'] == [3, 1, 2, 4]
def test_include_dict(self):
expected_res = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{a: 1, b: 2}')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config1['a'] == expected_res
config2 = ConfigFactory.parse_string(
"""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name)
)
assert config2['a'] == expected_res
config3 = ConfigFactory.parse_string(
"""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config3['a'] == expected_res
def test_include_substitution(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('y = ${x}')
fdin.flush()
config = ConfigFactory.parse_string(
"""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name)
)
assert config['x'] == 42
assert config['y'] == 42
def test_substitution_override(self):
config = ConfigFactory.parse_string(
"""
database {
host = localhost
port = 5432
user = people
name = peopledb
pass = peoplepass
}
user=test_user
pass=test_pass
database {
user = ${user}
pass = ${pass}
}
""")
assert config['database.user'] == 'test_user'
assert config['database.pass'] == 'test_pass'
def test_substitution_flat_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = peoplepass
name = ${?NOT_EXISTS}
pass = ${?NOT_EXISTS}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == 'peoplepass'
def test_substitution_nested_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = peoplepass
}
database {
name = ${?user}
pass = ${?pass}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == 'peoplepass'
def test_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
a = 45
b = ${?c}
d = ${?c} 4
e = ${?a}
g = ${?c1} ${?c2}
h = ${?c1} ${?c2} 1
""")
assert 'b' not in config
assert config['d'] == 4
assert config['e'] == 45
assert 'g' not in config
assert config['h'] == 1
def test_substitution_cycle(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
a = ${b}
b = ${c}
c = ${a}
""")
def test_assign_number_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
4
b = # test
# test2
5
c =
6
"""
)
assert config['a'] == 4
assert config['b'] == 5
assert config['c'] == 6
def test_assign_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
"a"
b = # test
# test2
"b"
c =
"c"
"""
)
assert config['a'] == 'a'
assert config['b'] == 'b'
assert config['c'] == 'c'
def test_assign_list_numbers_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
1,
2,
]
b = # test
# test2
[
3,
4,]
c =
[
5,
6
]
"""
)
assert config['a'] == [1, 2]
assert config['b'] == [3, 4]
assert config['c'] == [5, 6]
def test_assign_list_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
"a",
"b",
]
b = # test
# test2
[
"c",
"d",]
c =
[
"e",
"f"
]
"""
)
assert config['a'] == ['a', 'b']
assert config['b'] == ['c', 'd']
assert config['c'] == ['e', 'f']
def test_assign_dict_strings_with_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
{
a: 1,
b: 2,
}
b = # test
# test2
{
c: 3,
d: 4,}
c =
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_assign_dict_strings_no_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a
{
a: 1,
b: 2,
}
b # test
# test2
{
c: 3,
d: 4,}
c
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = 123
a = ${?test}
a = 5
"""
)
assert config1['a'] == 5
config2 = ConfigFactory.parse_string(
"""
{
database {
host = "localhost"
port = 8000
url = ${database.host}":"${database.port}
}
database {
host = ${?DB_HOST}
}
database {
host = "other.host.net"
port = 433
}
}
"""
)
assert config2['database']['host'] == 'other.host.net'
assert config2['database']['port'] == 433
assert config2['database']['url'] == 'other.host.net:433'
|
|
import re
from collections import namedtuple
from django.utils.translation import ugettext_lazy as _
# Add-on and File statuses.
STATUS_NULL = 0 # No review type chosen yet, add-on is incomplete.
STATUS_AWAITING_REVIEW = 1 # File waiting for review.
STATUS_PENDING = 2 # Personas (lightweight themes) waiting for review.
STATUS_NOMINATED = 3 # Waiting for review.
STATUS_APPROVED = 4 # Approved.
STATUS_DISABLED = 5 # Rejected (single files) or disabled by Mozilla (addons).
_STATUS_LISTED = 6 # Deprecated. See bug 616242
_STATUS_BETA = 7 # Deprecated, see addons-server/issues/7163
_STATUS_LITE = 8 # Deprecated, preliminary reviewed.
_STATUS_LITE_AND_NOMINATED = 9 # Deprecated, prelim & waiting for full review.
STATUS_DELETED = 11 # Add-on has been deleted.
STATUS_REJECTED = 12 # This applies only to rejected personas.
STATUS_REVIEW_PENDING = 14 # personas, needing further action.
STATUS_CHOICES_ADDON = {
STATUS_NULL: _(u'Incomplete'),
STATUS_NOMINATED: _(u'Awaiting Review'),
STATUS_APPROVED: _(u'Approved'),
STATUS_DISABLED: _(u'Disabled by Mozilla'),
STATUS_DELETED: _(u'Deleted'),
}
STATUS_CHOICES_PERSONA = {
STATUS_NULL: STATUS_CHOICES_ADDON[STATUS_NULL],
STATUS_PENDING: _(u'Pending approval'),
STATUS_APPROVED: STATUS_CHOICES_ADDON[STATUS_APPROVED],
STATUS_DISABLED: STATUS_CHOICES_ADDON[STATUS_DISABLED],
STATUS_DELETED: STATUS_CHOICES_ADDON[STATUS_DELETED],
STATUS_REJECTED: _(u'Rejected'),
# Approved, but the developer would like to put it public when they want.
STATUS_REVIEW_PENDING: _(u'Flagged for further review'),
}
STATUS_CHOICES_FILE = {
STATUS_AWAITING_REVIEW: _(u'Awaiting Review'),
STATUS_APPROVED: _(u'Approved'),
STATUS_DISABLED: _(u'Disabled by Mozilla'),
}
# We need to expose nice values that aren't localisable.
STATUS_CHOICES_API = {
STATUS_NULL: 'incomplete',
STATUS_AWAITING_REVIEW: 'unreviewed',
STATUS_PENDING: 'pending',
STATUS_NOMINATED: 'nominated',
STATUS_APPROVED: 'public',
STATUS_DISABLED: 'disabled',
STATUS_DELETED: 'deleted',
STATUS_REJECTED: 'rejected',
STATUS_REVIEW_PENDING: 'review-pending',
}
STATUS_CHOICES_API_LOOKUP = {
'incomplete': STATUS_NULL,
'unreviewed': STATUS_AWAITING_REVIEW,
'pending': STATUS_PENDING,
'nominated': STATUS_NOMINATED,
'public': STATUS_APPROVED,
'disabled': STATUS_DISABLED,
'deleted': STATUS_DELETED,
'rejected': STATUS_REJECTED,
'review-pending': STATUS_REVIEW_PENDING,
}
REVIEWED_STATUSES = (STATUS_APPROVED,)
UNREVIEWED_ADDON_STATUSES = (STATUS_NOMINATED,)
UNREVIEWED_FILE_STATUSES = (STATUS_AWAITING_REVIEW, STATUS_PENDING)
VALID_ADDON_STATUSES = (STATUS_NOMINATED, STATUS_APPROVED)
VALID_FILE_STATUSES = (STATUS_AWAITING_REVIEW, STATUS_APPROVED)
# Version channels
RELEASE_CHANNEL_UNLISTED = 1
RELEASE_CHANNEL_LISTED = 2
RELEASE_CHANNEL_CHOICES = (
(RELEASE_CHANNEL_UNLISTED, _(u'Unlisted')),
(RELEASE_CHANNEL_LISTED, _(u'Listed')),
)
CHANNEL_CHOICES_API = {
RELEASE_CHANNEL_UNLISTED: 'unlisted',
RELEASE_CHANNEL_LISTED: 'listed',
}
CHANNEL_CHOICES_LOOKUP = {
'unlisted': RELEASE_CHANNEL_UNLISTED,
'listed': RELEASE_CHANNEL_LISTED,
}
# Add-on author roles.
AUTHOR_ROLE_DEV = 4
AUTHOR_ROLE_OWNER = 5
AUTHOR_CHOICES = (
(AUTHOR_ROLE_OWNER, _(u'Owner')),
(AUTHOR_ROLE_DEV, _(u'Developer')),
)
# Addon types
ADDON_ANY = 0
ADDON_EXTENSION = 1
ADDON_THEME = 2
ADDON_DICT = 3
ADDON_SEARCH = 4
ADDON_LPAPP = 5
ADDON_LPADDON = 6
ADDON_PLUGIN = 7
ADDON_API = 8 # not actually a type but used to identify extensions + themes
ADDON_PERSONA = 9
ADDON_STATICTHEME = 10
# Addon type groupings.
GROUP_TYPE_ADDON = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP,
ADDON_LPADDON, ADDON_PLUGIN, ADDON_API]
GROUP_TYPE_THEME = [ADDON_THEME, ADDON_PERSONA, ADDON_STATICTHEME]
# Singular
ADDON_TYPE = {
ADDON_EXTENSION: _(u'Extension'),
ADDON_THEME: _(u'Complete Theme'),
ADDON_DICT: _(u'Dictionary'),
ADDON_SEARCH: _(u'Search Engine'),
ADDON_LPAPP: _(u'Language Pack (Application)'),
ADDON_LPADDON: _(u'Language Pack (Add-on)'),
ADDON_PLUGIN: _(u'Plugin'),
ADDON_PERSONA: _(u'Theme'),
ADDON_STATICTHEME: _(u'Theme (Static)'),
}
# Plural
ADDON_TYPES = {
ADDON_EXTENSION: _(u'Extensions'),
ADDON_THEME: _(u'Complete Themes'),
ADDON_DICT: _(u'Dictionaries'),
ADDON_SEARCH: _(u'Search Tools'),
ADDON_LPAPP: _(u'Language Packs (Application)'),
ADDON_LPADDON: _(u'Language Packs (Add-on)'),
ADDON_PLUGIN: _(u'Plugins'),
ADDON_PERSONA: _(u'Themes'),
ADDON_STATICTHEME: _(u'Themes (Static)'),
}
# Searchable Add-on Types
ADDON_SEARCH_TYPES = [
ADDON_ANY,
ADDON_EXTENSION,
ADDON_THEME,
ADDON_DICT,
ADDON_SEARCH,
ADDON_LPAPP,
ADDON_PERSONA,
ADDON_STATICTHEME,
]
# Icons
ADDON_ICONS = {
ADDON_ANY: 'default-addon.png',
ADDON_THEME: 'default-theme.png',
ADDON_STATICTHEME: 'default-theme.png',
}
# We use these slugs in browse page urls.
ADDON_SLUGS = {
ADDON_EXTENSION: 'extensions',
ADDON_DICT: 'language-tools',
ADDON_LPAPP: 'language-tools',
ADDON_SEARCH: 'search-tools',
ADDON_STATICTHEME: 'themes',
}
# These are used in the update API.
ADDON_SLUGS_UPDATE = {
ADDON_EXTENSION: 'extension',
ADDON_THEME: 'theme',
ADDON_DICT: 'extension',
ADDON_SEARCH: 'search',
ADDON_LPAPP: 'item',
ADDON_LPADDON: 'extension',
ADDON_PERSONA: 'background-theme',
ADDON_PLUGIN: 'plugin',
ADDON_STATICTHEME: 'static-theme',
}
# A slug to ID map for the search API. Included are all ADDON_TYPES that are
# found in ADDON_SEARCH_TYPES.
ADDON_SEARCH_SLUGS = {
'any': ADDON_ANY,
'extension': ADDON_EXTENSION,
'theme': ADDON_THEME,
'dictionary': ADDON_DICT,
'search': ADDON_SEARCH,
'language': ADDON_LPAPP,
'persona': ADDON_PERSONA,
'statictheme': ADDON_STATICTHEME,
}
ADDON_TYPE_CHOICES_API = {
ADDON_EXTENSION: 'extension',
ADDON_THEME: 'theme',
ADDON_DICT: 'dictionary',
ADDON_SEARCH: 'search',
ADDON_LPAPP: 'language',
ADDON_PERSONA: 'persona',
ADDON_STATICTHEME: 'statictheme',
}
# Edit addon information
MAX_TAGS = 20
MIN_TAG_LENGTH = 2
MAX_CATEGORIES = 2
VALID_CONTRIBUTION_DOMAINS = (
'donate.mozilla.org',
'liberapay.com',
'micropayment.de',
'opencollective.com',
'patreon.com',
'paypal.com',
'paypal.me'
)
# Icon upload sizes
ADDON_ICON_SIZES = [32, 64, 128]
_size_tuple = namedtuple('SizeTuple', 'width height')
# Preview upload sizes - see mozilla/addons-server#9487 for background.
ADDON_PREVIEW_SIZES = {
'thumb': _size_tuple(640, 480),
'min': _size_tuple(1000, 750),
'full': _size_tuple(2400, 1800)
}
# Static theme preview sizes
THEME_PREVIEW_SIZES = {
'header': {
'thumbnail': _size_tuple(473, 64),
'full': _size_tuple(680, 92),
'position': 0},
'list': {
'thumbnail': _size_tuple(529, 64),
'full': _size_tuple(760, 92),
'position': 1},
# single is planned to be the new default size in 2019 Q1.
'single': {
'thumbnail': _size_tuple(501, 64),
'full': _size_tuple(720, 92),
'position': 2},
}
THEME_FRAME_COLOR_DEFAULT = 'rgba(229,230,232,1)'
THEME_PREVIEW_TOOLBAR_HEIGHT = 92 # The template toolbar is this height.
# Persona image sizes [preview, full]
PERSONA_IMAGE_SIZES = {
'header': [(680, 100), (3000, 200)],
'footer': [None, (3000, 100)],
'icon': [None, (32, 32)],
}
# Accepted image extensions and MIME-types
THEME_BACKGROUND_EXTS = ('.jpg', '.jpeg', '.png', '.apng', '.svg', '.gif')
IMG_TYPES = ('image/png', 'image/jpeg')
VIDEO_TYPES = ('video/webm',)
# The string concatinating all accepted image MIME-types with '|'
SUPPORTED_IMAGE_TYPES = '|'.join(IMG_TYPES)
# Acceptable Add-on file extensions.
# This is being used by `parse_addon` so please make sure we don't have
# to touch add-ons before removing anything from this list.
VALID_ADDON_FILE_EXTENSIONS = ('.crx', '.xpi', '.jar', '.xml', '.json', '.zip')
# These types don't maintain app compatibility in the db. Instead, we look at
# APP.types and APP_TYPE_SUPPORT to figure out where they are compatible.
NO_COMPAT = (ADDON_SEARCH, ADDON_DICT, ADDON_PERSONA)
HAS_COMPAT = {t: t not in NO_COMPAT for t in ADDON_TYPES}
# Personas
PERSONAS_ADDON_ID = 10900 # Add-on ID of the Personas Plus Add-on
PERSONAS_FIREFOX_MIN = '3.6' # First Firefox version to support Personas
# Collections.
COLLECTION_NORMAL = 0
COLLECTION_SYNCHRONIZED = 1
COLLECTION_FEATURED = 2
COLLECTION_RECOMMENDED = 3
COLLECTION_FAVORITES = 4
COLLECTION_MOBILE = 5
COLLECTION_ANONYMOUS = 6
COLLECTIONS_NO_CONTRIB = (COLLECTION_SYNCHRONIZED, COLLECTION_FAVORITES)
COLLECTION_SPECIAL_SLUGS = {
COLLECTION_MOBILE: 'mobile',
COLLECTION_FAVORITES: 'favorites',
}
COLLECTION_CHOICES = {
COLLECTION_NORMAL: 'Normal',
COLLECTION_SYNCHRONIZED: 'Synchronized',
COLLECTION_FEATURED: 'Featured',
COLLECTION_RECOMMENDED: 'Generated Recommendations',
COLLECTION_FAVORITES: 'Favorites',
COLLECTION_MOBILE: 'Mobile',
COLLECTION_ANONYMOUS: 'Anonymous',
}
COLLECTION_SEARCH_CHOICES = [
COLLECTION_NORMAL,
COLLECTION_FEATURED,
COLLECTION_RECOMMENDED,
COLLECTION_MOBILE,
COLLECTION_ANONYMOUS,
]
# Validation.
# A skeleton set of passing validation results.
VALIDATOR_SKELETON_RESULTS = {
"errors": 0,
"warnings": 0,
"notices": 0,
"success": True,
"compatibility_summary": {"notices": 0, "errors": 0, "warnings": 0},
"metadata": {
"listed": True,
},
"messages": [],
"message_tree": {},
"ending_tier": 5,
}
# A skeleton set of validation results for a system error.
VALIDATOR_SKELETON_EXCEPTION_WEBEXT = {
"errors": 1,
"warnings": 0,
"notices": 0,
"success": False,
"compatibility_summary": {"notices": 0, "errors": 0, "warnings": 0},
"metadata": {
"listed": True,
"is_webextension": True,
},
"messages": [
{"id": ["validator", "unexpected_exception"],
"message": "Sorry, we couldn't load your WebExtension.",
"description": [
"Validation was unable to complete successfully due to an "
"unexpected error.",
"Check https://developer.mozilla.org/en-US/Add-ons/WebExtensions "
"to ensure your webextension is valid or file a bug at "
"http://bit.ly/1POrYYU"],
"type": "error",
"fatal": True,
"tier": 1,
"for_appversions": None,
"uid": "35432f419340461897aa8362398339c4"}
],
"message_tree": {},
"ending_tier": 5,
}
VERSION_SEARCH = re.compile(r'\.(\d+)$')
# For use in urls.
ADDON_ID = r"""(?P<addon_id>[^/<>"']+)"""
ADDON_UUID = r'(?P<uuid>[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12})'
# Default strict_min_version and strict_max_version for WebExtensions
DEFAULT_WEBEXT_MIN_VERSION = '42.0'
DEFAULT_WEBEXT_MAX_VERSION = '*'
# Android only started to support WebExtensions with version 48
DEFAULT_WEBEXT_MIN_VERSION_ANDROID = '48.0'
# The default version of Firefox that supports WebExtensions without an id
DEFAULT_WEBEXT_MIN_VERSION_NO_ID = '48.0'
# The default version of Firefox that supported `browser_specific_settings`
DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC = '48.0'
# The version of desktop Firefox that first supported static themes.
DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX = '53.0'
# The version of Android that first minimally supported static themes.
DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID = '65.0'
# The version of Firefox that first supported webext dictionaries. Note that
# on AMO at the moment, dicts have no compatibility exposed - ADDON_DICT is in
# NO_COMPAT. But this allows the compat information to be saved to the database
# to change our mind later.
# Dicts are not compatible with Firefox for Android, only desktop is relevant.
DEFAULT_WEBEXT_DICT_MIN_VERSION_FIREFOX = '61.0'
ADDON_GUID_PATTERN = re.compile(
# Match {uuid} or [email protected] ("something" being optional)
# guids. Copied from mozilla-central XPIProvider.jsm.
r'^(\{[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\}'
r'|[a-z0-9-\._]*\@[a-z0-9-\._]+)$', re.IGNORECASE)
SYSTEM_ADDON_GUIDS = (
u'@mozilla.org', u'@shield.mozilla.org', u'@pioneer.mozilla.org',
u'@mozilla.com')
MOZILLA_TRADEMARK_SYMBOLS = (
'mozilla', 'firefox')
ALLOWED_TRADEMARK_SUBMITTING_EMAILS = (
'@mozilla.com', '@mozilla.org')
# If you add/remove any sources, update the docs: /api/download_sources.html
# Note there are some additional sources here for historical/backwards compat.
DOWNLOAD_SOURCES_FULL = (
'addondetail', 'addon-detail-version', 'api', 'category', 'collection',
'creatured', 'developers', 'discovery-dependencies', 'discovery-upsell',
'discovery-video', 'email', 'find-replacement', 'fxcustomization',
'fxfirstrun', 'fxwhatsnew', 'homepagebrowse', 'homepagepromo',
'installservice', 'mostshared', 'oftenusedwith', 'prerelease-banner',
'recommended', 'rockyourfirefox', 'search', 'sharingapi',
'similarcollections', 'ss', 'userprofile', 'version-history',
'co-hc-sidebar', 'co-dp-sidebar',
'cb-hc-featured', 'cb-dl-featured', 'cb-hc-toprated', 'cb-dl-toprated',
'cb-hc-mostpopular', 'cb-dl-mostpopular', 'cb-hc-recentlyadded',
'cb-dl-recentlyadded',
'hp-btn-promo', 'hp-dl-promo', 'hp-hc-featured', 'hp-dl-featured',
'hp-hc-upandcoming', 'hp-dl-upandcoming', 'hp-hc-mostpopular',
'hp-dl-mostpopular', 'hp-contest-winners',
'dp-hc-oftenusedwith', 'dp-dl-oftenusedwith', 'dp-hc-othersby',
'dp-dl-othersby', 'dp-btn-primary', 'dp-btn-version', 'dp-btn-devchannel',
'dp-hc-dependencies', 'dp-dl-dependencies', 'dp-hc-upsell', 'dp-dl-upsell',
)
DOWNLOAD_SOURCES_PREFIX = (
'external-', 'mozcom-', 'discovery-', 'cb-btn-', 'cb-dl-')
|
|
from __future__ import print_function
import ply.lex as lex
from itertools import groupby
from datetime import datetime
import re
class ParseError(Exception):
def __init__(self, message, **kwargs):
super(Exception, self).__init__(message)
self.info = kwargs
reserved = set(['after', 'receive', 'yield', 'quit', 'window', 'repeat', 'in', 'foreach', 'to', 'merged', 'results',
'start_timestamp'])
tokens = [
'TIMEDELTA',
'TIMESTAMP', 'STRING', 'NUMBER',
'COMMA',
'WILDCARD', 'ARROW', 'EQ', 'LT', 'GT', 'LTE', 'GTE',
'SCALAR', 'HASH', 'SCALAR_RESULT', 'ARRAY', 'MULTISET', 'HLL',
'ID', 'WS', 'INDENT', 'NEWLINE', 'DEDENT', 'LBRACKET', 'RBRACKET',
'LPAREN', 'RPAREN'
] + [r.upper() for r in reserved]
type_names = {
'ID': 'identifier'
}
# Tokens
t_LT = r'<'
t_GT = r'>'
t_LTE = r'<='
t_GTE = r'>='
t_COMMA = r','
t_WILDCARD= r'\*'
t_ARROW = r'->'
t_EQ = r'='
t_LBRACKET = '\['
t_RBRACKET = '\]'
t_LPAREN = '\('
t_RPAREN = '\)'
#t_WS = r'[ \t]+'
def t_TIMEDELTA(t):
r'\d+(s|m|h|d)'
try:
t.value = int(t.value[:-1]), t.value[-1]
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_TIMESTAMP(t):
r'\'\d{4}-\d{2}-\d{2}\''
try:
t.value = int((datetime.strptime(t.value.strip("'"), '%Y-%m-%d') - datetime(1970, 1, 1)).total_seconds())
except ValueError:
print("Cannot parse datetime", t.value)
t.value = 0
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = t.value.upper() if t.value in reserved else 'ID'
return t
def t_SCALAR(t):
r'%[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_HASH(t):
r'\#[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_MULTISET(t):
r'&[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_HLL(t):
r'\^[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_ARRAY(t):
r'@[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_SCALAR_RESULT(t):
r'\$[a-zA-Z_][a-zA-Z_0-9]*'
return t
def t_STRING(t):
r'("(\\"|[^"])*")|(\'(\\\'|[^\'])*\')'
t.value = t.value[1:-1]
return t
def t_comment(t):
r"[ ]*--[^\n]*"
pass
def t_ws(t):
r'[ ]+'
t.type = 'WS'
return t
def t_newline_escape(t):
r"\\\n"
pass
def t_newline(t):
r'\n'
t.lexer.lineno += t.value.count("\n")
t.type = 'NEWLINE'
t.value = ''
return t
#def t_indent(t):
# r'\n+[ \t]*'
# t.lexer.lineno += t.value.count("\n")
# t.type = 'INDENT'
# return t
def t_error(t):
if t.lineno == -1:
raise ParseError(message="Lexer error: unexpected EOF")
else:
raise ParseError(message="Lexer error at line %s position %s: invalid token %s" % (t.lineno, t.lexpos, t.value),
lineno=t.lineno,
lexpos=t.lexpos,
type=t.type,
value=t.value)
class IndentLexer:
def __init__(self, lexer):
self.lexer = lexer
self.gen = gen_dedents(gen_indents(skip_begin_newlines(lexer)))
def input(self, *args, **kwds):
self.lexer.input(*args, **kwds)
def token(self):
try:
return self.gen.next()
except StopIteration:
return None
def __iter__(self):
return gen_dedents(gen_indents(skip_begin_newlines(self.lexer)))
def indent_level(v):
spaces = v.replace("\t", " ").replace("\n", "")
return len(spaces)
def skip_begin_newlines(lexer):
program_started = False
for token in lexer:
if program_started:
yield token
else:
if token.type not in ('NEWLINE', 'WS'):
program_started = True
yield token
def gen_indents(lexer):
prev = None
line_started = False
for token in lexer:
#print token
if token.type not in ('NEWLINE', 'WS'):
if not line_started:
line_started = True
if prev :
yield _new_token('INDENT', token.lineno, value=prev.value)
yield token
prev = token
elif token.type == 'NEWLINE':
line_started = False
prev = token
elif token.type == 'WS':
prev = token
def gen_dedents(lexer):
stack = [0]
for token in lexer:
if token.type != 'INDENT':
yield token
else:
level = indent_level(token.value)
if level == stack[-1]:
yield _new_token('NEWLINE', token.lineno)
continue
elif level < stack[-1]:
while stack[-1] > level:
stack_level = stack.pop()
if stack_level > 0:
yield _new_token('DEDENT', token.lineno)
if stack[-1] != level:
raise ParseError("Indent level doesn't match earlier at %s: stack %s now %s" % (token.lineno, stack, level))
elif level > stack[-1]:
stack.append(level)
yield _new_token('INDENT', token.lineno)
while stack:
stack_level = stack.pop()
if stack_level > 0:
yield _new_token('DEDENT', -1)
def _new_token(type, lineno, value=None):
tok = lex.LexToken()
tok.type = type
tok.lineno = lineno
tok.value = value
tok.lexpos = -100
return tok
def timedelta_to_seconds(n, unit):
if unit == 's':
return n
elif unit == 'm':
return n * 60
elif unit == 'h':
return n * 60 * 60
elif unit == 'd':
return n * 60 * 60 * 24
else:
raise ParseError("unknown time unit: %s" % unit)
def p_program(p):
"""program : foreach_expr INDENT rules DEDENT
| rules"""
if len(p) > 2:
p[0] = {'rules' : p[3], 'groupby' : p[1]}
else:
p[0] = {'rules' : p[1]}
def p_foreach_expr(p):
""" foreach_expr : FOREACH vars IN ARRAY
| FOREACH vars IN ARRAY MERGED
| FOREACH vars IN ARRAY MERGED RESULTS """
p[0] = {'vars': p[2], 'values': p[4], "lineno": p.lineno(2)}
if len(p) > 5:
p[0]['merge_results'] = True
def p_foreach_expr_imp(p):
""" foreach_expr : FOREACH SCALAR
| FOREACH SCALAR MERGED
| FOREACH SCALAR MERGED RESULTS """
p[0] = {'vars': [p[2]], "lineno": p.lineno(2)}
if len(p) > 3:
p[0]['merge_results'] = True
def p_vars(p):
"""vars : vars COMMA var
| var """
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_var(p):
""" var : HASH
| SCALAR
"""
p[0] = p[1]
def p_rules(p):
"""rules : rules rule
| rule """
if len(p) > 2:
p[0] = p[1] + [p[2]]
else:
p[0] = [p[1]]
def p_rule(p):
""" rule : ID ARROW INDENT rule_body DEDENT
"""
p[0] = {k : v for k, v in p[4].items() + [('name', p[1])]}
def p_rule_body(p):
""" rule_body : window_stmt
| receive_stmt
"""
p[0] = p[1]
def p_windowed_rule(p):
""" window_stmt : WINDOW INDENT rules DEDENT AFTER TIMEDELTA ARROW actions
"""
p[0] = {'rules' : p[3], 'after' : p[8], 'window' : timedelta_to_seconds(*p[6])}
def p_receive_rule(p):
""" receive_stmt : RECEIVE INDENT match_clauses DEDENT
"""
p[0] = {'clauses' : p[3]}
def p_receive_rule2(p):
""" receive_stmt : RECEIVE INDENT match_clauses DEDENT AFTER TIMEDELTA ARROW actions """
p[0] = {'clauses' : p[3], 'window' : timedelta_to_seconds(*p[6]), 'after' : p[8] }
def p_receive_rule3(p):
""" receive_stmt : RECEIVE INDENT match_clauses DEDENT AFTER ARROW actions """
p[0] = {'clauses' : p[3], 'after' : p[7] }
def p_match_clauses(p):
"""match_clauses : match_clauses NEWLINE match_clause
| match_clause """
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_match_clause(p):
""" match_clause : conditions ARROW actions """
p[0] = {k:v for k, v in [("attrs", p[1]), ("lineno", p.lineno(2))] + p[3].items()}
def p_match_clause2(p):
""" match_clause : WILDCARD ARROW actions """
p[0] = {k:v for k, v in [("attrs", {}), ("lineno", p.lineno(2))] + p[3].items()}
# concatitems([[1],[2]]) -> [1,2]
# concatitems([1,[2]]) -> [1,2]
# concatitems([1]) -> [1]
def concatitems(items):
assert(items)
res = []
for k, v in items:
if isinstance(v, list):
res.extend(v)
else:
res.append(v)
return res
def p_conditions(p):
"""conditions : conditions COMMA condition
| condition """
if len(p) > 2:
p[0] = {k: concatitems(v) for k, v in groupby(sorted(p[1].items() + p[3].items()),
key=lambda x: x[0])}
else:
p[0] = p[1]
def p_condition(p):
""" condition : ID EQ STRING
| ID EQ SCALAR """
p[0] = {p[1]: [p[3]]}
def p_condition_hash(p):
""" condition : ID IN HASH"""
p[0] = {p[1]: [p[3]]}
def p_condition_ts(p):
""" condition : ID LT TIMESTAMP
| ID GT TIMESTAMP
| ID GTE TIMESTAMP
| ID LTE TIMESTAMP """
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_condition_ts_2(p):
""" condition : ID LT NUMBER
| ID GT NUMBER
| ID GTE NUMBER
| ID LTE NUMBER """
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_condition_ts_3(p):
""" condition : ID LT SCALAR
| ID GT SCALAR
| ID GTE SCALAR
| ID LTE SCALAR """
p[0] = {p[1]: [p[2] + str(p[3])]}
def p_actions(p):
""" actions : yields COMMA transition """
p[0] = {'yield' : p[1], 'action' : p[3]}
def p_actions_2(p):
""" actions : yields """
p[0] = {'yield' : p[1]}
def p_actions_3(p):
""" actions : transition """
p[0] = {'action' : p[1]}
def p_action_yields(p):
""" yields : yields COMMA YIELD yield_var
| YIELD yield_var """
if len(p) == 3:
p[0] = [p[2]]
else:
p[0] = p[1] + [p[4]]
def p_action_yield_var(p):
""" yield_var : SCALAR_RESULT
"""
p[0] = {'dst': p[1]}
def p_action_yield_set(p):
""" yield_var : ID TO HASH """
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_multiset(p):
""" yield_var : ID TO MULTISET """
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_hll(p):
""" yield_var : ID TO HLL """
p[0] = {'dst': p[3], 'src': [{'_k': 'field', 'name': p[1]}]}
def p_action_yield_set_tuple(p):
""" yield_var : ids TO HASH """
p[0] = {'dst': p[3], 'src': p[1]}
def p_action_yield_multiset_tuple(p):
""" yield_var : ids TO MULTISET """
p[0] = {'dst': p[3], 'src': p[1]}
def p_action_yield_hll_tuple(p):
""" yield_var : ids TO HLL """
p[0] = {'dst': p[3], 'src': p[1]}
def p_ids(p):
"""ids : ids COMMA yieldable
| yieldable """
if len(p) > 2:
p[0] = p[1] + [p[3]]
else:
p[0] = [p[1]]
def p_yieldable(p):
""" yieldable : ID """
p[0] = {'_k': 'field', 'name': p[1]}
def p_yieldable_start_ts(p):
""" yieldable : START_TIMESTAMP """
p[0] = {'_k': 'window_ref'}
def p_yieldable_fcall(p):
""" yieldable : fcall """
p[0] = p[1]
def p_yieldable_windowref(p):
""" yieldable : START_TIMESTAMP LBRACKET ID RBRACKET """
p[0] = {'_k': 'window_ref', 'ref': p[3]}
def p_fcall(p):
""" fcall : ID LPAREN arglist RPAREN """
p[0] = {'_k': 'fcall',
'name': p[1],
'args': p[3]}
def p_arglist(p):
""" arglist : arglist COMMA arg
| arg """
if len(p) == 2:
p[0] = [p[1]]
elif len(p) == 4:
p[0] = p[1] + [p[3]]
def p_arg_id(p):
""" arg : ID """
p[0] = {'_k': 'field', 'name': p[1]}
def p_arg_scalar(p):
""" arg : SCALAR """
p[0] = {'_k': 'param', 'name': p[1]}
def p_arg_fcall(p):
""" arg : fcall """
p[0] = p[1]
def p_arg_ts(p):
""" arg : START_TIMESTAMP LBRACKET ID RBRACKET """
p[0] = {'_k': 'window_ref', 'ref': p[3]}
def p_arg_literal(p):
""" arg : STRING
| NUMBER """
p[0] = {'_k': 'literal', 'value': p[1]}
def p_transition(p):
""" transition : ID """
p[0] = p[1]
def p_transition2(p):
""" transition : QUIT
| REPEAT"""
p[0] = p[1]
def p_error(p):
if p is None or p.lineno == -1:
raise ParseError(message="Syntax error: unexpected EOF")
else:
raise ParseError(message="Syntax error at line %s position %s: %s %s" % (p.lineno, p.lexpos, type_names.get(p.type, p.type), p.value),
lineno=p.lineno,
lexpos=p.lexpos,
type=p.type,
value=p.value)
# Convert a structure with nested window() statements to a flat list of rules
# Replace transitions with numeric labels. Use restart-from-next(%label)
# in rule matcher clauses, and restart-from-here(%label) in after actions.
def assign_numeric_labels(rules, n = 0):
for r in rules:
r['n'] = n
n += 1
if 'rules' in r:
n = assign_numeric_labels(r['rules'], n)
r['outer'] = n
return n
def flatten_rules(rules):
for r in rules:
nested = r.get('rules')
if nested:
del r['rules']
yield r
if nested:
for r in flatten_rules(nested):
yield r
reserved_actions = ['repeat', 'quit']
def convert_transitions(rules):
numeric = {r['name'] : r['n'] for r in rules}
for r in rules:
if 'after' in r:
if 'action' in r['after']:
action = r['after']['action']
if action not in reserved_actions:
r['after']['action'] = 'restart-from-here(%d)' % numeric[action]
else:
r['after']['action'] = 'restart-from-here'
for c in r.get('clauses', []):
if 'action' in c:
action = c['action']
if action not in reserved_actions:
if action not in numeric:
raise ParseError(message='Label not found: %s' % action, lineno=c.get('lineno'), lexpos=c.get('lexpos'))
c['action'] = 'restart-from-next(%d)' % numeric[action]
else:
if r['n'] >= 1:
raise ParseError(message='Consider adding repeat here', lineno=c.get('lineno'), lexpos=c.get('lexpos'))
else:
c['action'] = 'repeat'
import ply.yacc as yacc
parser = yacc.yacc()
# Build the lexer
lexer = lex.lex()
lexer = IndentLexer(lexer)
import sys
import json
EXPR_TYPE_CONST = 'const'
EXPR_TYPE_IN_VAR = 'in_var'
EXPR_TYPE_TIMESTAMP_OP_VAR = 'timestamp_op_var'
EXPR_TYPE_TIMESTAMP_OP_CONST = 'timestamp_op_const'
def is_variable(n):
if n == '':
return False
return n[0] in '#&%$@'
def parse_expr(expr_string):
m = re.match('((>=)|(<=)|(==)|(<)|(>))(.+)', expr_string)
if m:
if is_variable(m.group(7)):
return (EXPR_TYPE_TIMESTAMP_OP_VAR, (m.group(1), m.group(7)))
else:
return (EXPR_TYPE_TIMESTAMP_OP_CONST, (m.group(1), m.group(7)))
if is_variable(expr_string):
return (EXPR_TYPE_IN_VAR, (expr_string,))
else:
return (EXPR_TYPE_CONST, (expr_string,))
def get_var_fields(rules):
res = {}
for rule in rules:
for clause in rule.get('clauses', []):
for field, conditions in clause.get('attrs', {}).items():
for expr in conditions:
t, r = parse_expr(expr)
if t == EXPR_TYPE_IN_VAR:
res[r[0]] = field
elif t == EXPR_TYPE_TIMESTAMP_OP_VAR:
res[r[1]] = field
return res
def compile_tr(text):
lexer.input(text)
result = parser.parse(lexer = lexer)
assign_numeric_labels(result['rules'])
flat_rules = list(flatten_rules(result['rules']))
convert_transitions(flat_rules)
if 'groupby' in result:
return { 'rules' : flat_rules, 'groupby': result['groupby']}
else:
return {'rules' : flat_rules}
def syntax_check(text):
try:
parser = yacc.yacc()
# Build the lexer
lexer = lex.lex()
lexer = IndentLexer(lexer)
sys.stderr.write("text %s\n" % text)
lexer.input(text)
result = parser.parse(lexer=lexer)
assign_numeric_labels(result['rules'])
flat_rules = list(flatten_rules(result['rules']))
convert_transitions(flat_rules)
return []
except ParseError as e:
sys.stderr.write("exception %s %s\n" % (e.message, lexer.lexer.lineno))
return [{'message' : e.message, 'info' : e.info}]
if __name__ == '__main__':
if len(sys.argv) == 1:
flat_rules = compile_tr(sys.stdin.read())
print(json.dumps(flat_rules))
elif sys.argv[1] == 'lex':
lexer.input(sys.stdin.read())
for t in lexer:
print(t.lineno, t.type, t.value)
elif sys.argv[1] == 'gen':
pass
|
|
# -*- coding: UTF-8 -*-
# Copyright 2010-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django import VERSION
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.db.utils import DatabaseError
from django.core.exceptions import FieldDoesNotExist
from django.contrib.contenttypes.models import ContentType, models
from lino.core.roles import SiteStaff
from lino.api import dd, rt
from etgen.html import E
from lino.utils import join_elems
from lino.core.utils import get_models
if VERSION[0] > 2:
# restore Django 2 behaviour
def old__ct_str(self):
return self.name
ContentType.__str__ = old__ct_str
class ContentTypes(dd.Table):
model = 'contenttypes.ContentType'
required_roles = dd.login_required(SiteStaff)
detail_layout = """
id app_label model base_classes
HelpTextsByModel
BrokenGFKsByModel
"""
@dd.displayfield(_("Base classes"))
def base_classes(self, obj, ar):
if obj is None:
return ""
chunks = []
def add(cl):
for b in cl.__bases__:
add(b)
# :
if issubclass(cl, dd.Model) and cl is not dd.Model \
and cl._meta.managed:
if getattr(cl, '_meta', False) and not cl._meta.abstract:
#~ logger.info("20120205 adding(%r)",cl)
ct = ContentType.objects.get_for_model(cl)
chunks.append(
ar.obj2html(ct, str(cl._meta.verbose_name)))
#~ add(obj.model_class())
cl = obj.model_class()
# e.g. if database is nor synchronized
if cl is not None:
for b in cl.__bases__:
add(b)
return E.p(*join_elems(chunks, sep=', '))
class HelpText(dd.Model):
class Meta(object):
app_label = 'gfks'
verbose_name = _("Help Text")
verbose_name_plural = _("Help Texts")
content_type = dd.ForeignKey('contenttypes.ContentType',
verbose_name=_("Model"))
field = models.CharField(_("Field"), max_length=200)
help_text = dd.RichTextField(_("HelpText"),
blank=True, null=True, format='plain')
def __str__(self):
return self.content_type.app_label + '.' \
+ self.content_type.model + '.' + self.field
@dd.chooser(simple_values=True)
def field_choices(cls, content_type):
l = []
if content_type is not None:
model = content_type.model_class()
meta = model._meta
for f in meta.fields:
if not getattr(f, '_lino_babel_field', False):
l.append(f.name)
for f in meta.many_to_many:
l.append(f.name)
for f in meta.virtual_fields:
l.append(f.name)
for a in model.get_default_table().get_actions():
l.append(a.action.action_name)
l.sort()
return l
#~ def get_field_display(cls,fld):
#~ return fld
@dd.virtualfield(models.CharField(_("Verbose name"), max_length=200))
def verbose_name(self, request):
m = self.content_type.model_class()
de = m.get_default_table().get_data_elem(self.field)
if isinstance(de, models.Field):
return "%s (%s)" % (str(de.verbose_name),
str(_("database field")))
if isinstance(de, dd.VirtualField):
return str(de.return_type.verbose_name)
if isinstance(de, dd.Action):
return str(de.label)
return str(de)
class HelpTexts(dd.Table):
required_roles = dd.login_required(SiteStaff)
model = 'gfks.HelpText'
column_names = "field verbose_name help_text id content_type"
class HelpTextsByModel(HelpTexts):
master_key = 'content_type'
class BrokenGFKs(dd.VirtualTable):
label = _("Broken GFKs")
required_roles = dd.login_required(SiteStaff)
column_names = "database_model database_object message todo"
@classmethod
def get_data_rows(self, ar):
f = settings.SITE.kernel.get_broken_generic_related
for model in get_models(include_auto_created=True):
for obj in f(model):
yield obj
@dd.displayfield(_("Database object"))
def database_object(self, obj, ar):
return ar.obj2html(obj)
@dd.displayfield(_("Message"))
def message(self, obj, ar):
return obj._message
@dd.displayfield(_("Action"))
def todo(self, obj, ar):
return obj._todo
@dd.displayfield(_("Database model"))
def database_model(self, obj, ar):
ct = ContentType.objects.get_for_model(obj.__class__)
return ar.obj2html(ct)
class BrokenGFKsByModel(BrokenGFKs):
master = 'contenttypes.ContentType'
column_names = "database_object message todo"
@classmethod
def get_data_rows(self, ar):
mi = ar.master_instance
f = settings.SITE.kernel.get_broken_generic_related
if mi is not None:
for obj in f(mi.model_class()):
yield obj
@classmethod
def get_pk_field(self):
return settings.SITE.site_config._meta.get_field('id')
@classmethod
def get_row_by_pk(self, ar, pk):
mi = ar.master_instance
if mi is None:
return None
M = mi.model_class()
try:
return M.objects.get(pk=pk)
except ValueError:
return None
except M.DoesNotExist:
return None
@classmethod
def get_row_permission(cls, obj, ar, state, ba):
return True
if False: # disabled 20160712
@dd.receiver(dd.pre_ui_build)
def my_pre_ui_build(sender, **kw):
try:
HelpText = rt.models.gfks.HelpText
for ht in HelpText.objects.filter(help_text__isnull=False):
# dd.logger.info("20120629 %s.help_text", ht)
try:
dd.resolve_field(str(ht)).help_text = ht.help_text
except FieldDoesNotExist as e:
#~ logger.debug("No help texts : %s",e)
pass
except DatabaseError as e:
dd.logger.debug("No help texts : %s", e)
pass
# cause `inv mm` to generate translatable strings from Django's
# original module since those translations are not loaded.
_("content type")
_("content types")
|
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Spanish National Research Council.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import abc
import argparse
import logging
import os
import six
from stevedore import extension
from mistral.openstack.common.apiclient import exceptions
logger = logging.getLogger(__name__)
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
global _discovered_plugins
_discovered_plugins = {}
def add_plugin(ext):
_discovered_plugins[ext.name] = ext.plugin
ep_namespace = "mistral.openstack.common.apiclient.auth"
mgr = extension.ExtensionManager(ep_namespace)
mgr.map(add_plugin)
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
group = parser.add_argument_group("Common auth options")
BaseAuthPlugin.add_common_opts(group)
for name, auth_plugin in _discovered_plugins.iteritems():
group = parser.add_argument_group(
"Auth-system '%s' options" % name,
conflict_handler="resolve")
auth_plugin.add_opts(group)
def load_plugin(auth_system):
try:
plugin_class = _discovered_plugins[auth_system]
except KeyError:
raise exceptions.AuthSystemNotFound(auth_system)
return plugin_class(auth_system=auth_system)
def load_plugin_from_args(args):
"""Load required plugin and populate it with options.
Try to guess auth system if it is not specified. Systems are tried in
alphabetical order.
:type args: argparse.Namespace
:raises: AuthorizationFailure
"""
auth_system = args.os_auth_system
if auth_system:
plugin = load_plugin(auth_system)
plugin.parse_opts(args)
plugin.sufficient_options()
return plugin
for plugin_auth_system in sorted(_discovered_plugins.iterkeys()):
plugin_class = _discovered_plugins[plugin_auth_system]
plugin = plugin_class()
plugin.parse_opts(args)
try:
plugin.sufficient_options()
except exceptions.AuthPluginOptionsMissing:
continue
return plugin
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
@six.add_metaclass(abc.ABCMeta)
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
auth_system = None
opt_names = []
common_opt_names = [
"auth_system",
"username",
"password",
"tenant_name",
"token",
"auth_url",
]
def __init__(self, auth_system=None, **kwargs):
self.auth_system = auth_system or self.auth_system
self.opts = dict((name, kwargs.get(name))
for name in self.opt_names)
@staticmethod
def _parser_add_opt(parser, opt):
"""Add an option to parser in two variants.
:param opt: option name (with underscores)
"""
dashed_opt = opt.replace("_", "-")
env_var = "OS_%s" % opt.upper()
arg_default = os.environ.get(env_var, "")
arg_help = "Defaults to env[%s]." % env_var
parser.add_argument(
"--os-%s" % dashed_opt,
metavar="<%s>" % dashed_opt,
default=arg_default,
help=arg_help)
parser.add_argument(
"--os_%s" % opt,
metavar="<%s>" % dashed_opt,
help=argparse.SUPPRESS)
@classmethod
def add_opts(cls, parser):
"""Populate the parser with the options for this plugin.
"""
for opt in cls.opt_names:
# use `BaseAuthPlugin.common_opt_names` since it is never
# changed in child classes
if opt not in BaseAuthPlugin.common_opt_names:
cls._parser_add_opt(parser, opt)
@classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
@staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return (opt_name, getattr(args, "os_%s" % opt_name, None))
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute `self.opts` with a
dict containing the options and values needed to make authentication.
"""
self.opts.update(dict(self.get_opt(opt_name, args)
for opt_name in self.opt_names))
def authenticate(self, http_client):
"""Authenticate using plugin defined method.
The method usually analyses `self.opts` and performs
a request to authentication server.
:param http_client: client object that needs authentication
:type http_client: HTTPClient
:raises: AuthorizationFailure
"""
self.sufficient_options()
self._do_authenticate(http_client)
@abc.abstractmethod
def _do_authenticate(self, http_client):
"""Protected method for authentication.
"""
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
missing = [opt
for opt in self.opt_names
if not self.opts.get(opt)]
if missing:
raise exceptions.AuthPluginOptionsMissing(missing)
@abc.abstractmethod
def token_and_endpoint(self, endpoint_type, service_type):
"""Return token and endpoint.
:param service_type: Service type of the endpoint
:type service_type: string
:param endpoint_type: Type of endpoint.
Possible values: public or publicURL,
internal or internalURL,
admin or adminURL
:type endpoint_type: string
:returns: tuple of token and endpoint strings
:raises: EndpointException
"""
|
|
import os
import re
from bs4 import BeautifulSoup
from .utils import get_siteurl, on_no_errors
EXPORTKEYS = ['odt', 'pdf', 'epub', 'print']
class StablePage(object):
'''A wrapper around mwclients `Page` to make sure the latest revision
is also the stable one'''
def __init__(self, site, title):
self.site = site
self.raw_title = title
self.errors = []
self.friendly_title = title.replace("_", " ")
self.wrapped_page = site.Pages[title]
self.text = self.wrapped_page.text()
if not self.text:
self.errors.append(
'No Page with this title was found: "%s"' % title)
self.find_stable_id()
self.compare_stable_and_current()
self.build_fullurl()
@on_no_errors
def build_fullurl(self):
site_url = get_siteurl(self.site)
self.fullurl = os.path.join(
site_url, 'w',
'index.php?title={0.raw_title}&stableid={0.stable_id}'.format(self))
@on_no_errors
def compare_stable_and_current(self):
latest_rev_id = list(self.wrapped_page.revisions())[0]['revid']
print("TT", self.raw_title, latest_rev_id, self.stable_id)
# import pdb; pdb.set_trace()
if self.stable_id != latest_rev_id:
self.errors.append(
'The stable revision for "%s" is outdated' % self.raw_title)
def find_stable_id(self):
r = self.site.api("query", titles=self.raw_title, prop="flagged")
pages = r['query']['pages']
# pages is a dict with one key: the page_id we don't know yet.
pid = list(pages.keys())[0]
try:
self.stable_id = int(pages[pid]['flagged']['stable_revid']) # jay!
except KeyError:
self.errors.append(
'No stable revision found for "%s"' % self.raw_title)
def __str__(self):
return "%s(%s)" % (self.raw_title, self.friendly_title)
class BaseBookTocItem(object):
'''An item in a book toc'''
def __init__(self, site, line):
'''expects line to be a wikitext list item with a link'''
self.is_valid = False
self.errors = []
try:
self.parse_tocline(line)
except AttributeError:
self.errors.append("Can't parse toc-line: %s" % line)
return
self.target = self.link_parts[0].replace(" ", "_")
if "#" in self.target:
return
self.text = self.link_parts[
1 if len(self.link_parts) > 1 else 0].strip()
self.is_valid = True
self.stable_page = StablePage(site, self.title())
self.errors += self.stable_page.errors
def parse_tocline(self, line):
match = re.match(self.item_re, line.strip())
list_part, link_part = match.groups()
self.depth = len(list_part)
self.link_parts = link_part.split('|')
def __str__(self):
return str(self.__dict__)
class VersionizedBookTocItem(BaseBookTocItem):
item_re = re.compile(r'((?:#|\*)+).*\[(.*)\]')
title_re = re.compile(r'.*title=(.*?)&')
def title(self):
return re.match(self.title_re, self.target).groups()[0]
class LiveBookTocItem(BaseBookTocItem):
item_re = re.compile(r'((?:#|\*)+).*\[\[(.*)\]\]')
def title(self):
return self.target
class Bookinfo(dict):
def __getitem__(self, key):
super().__getitem__(key.upper())
def __setitem__(self, key, val):
super().__setitem__(key.upper(), val)
def validation_errors(self):
errors = []
if not "AUTOREN" in self or "HERAUSGEBER" in self:
errors.append("AUTOREN oder HERAUSGEBER notwendig")
return errors
class ExistingBook(object):
'''Holds data of an exisiting handbuch.io-`book`
Attempts to analyze the toc and the first template
on the book's start page
'''
def __init__(self, site, title, version):
'''
Args:
site (Site): Mwclient site object
title(str): Booktitle
version(str): Book version
'''
self.site = site
self.errors = []
self.version = version
# self.site_url = get_siteurl(site)
if self.version != "live":
title = "/".join((title, self.version))
self.book_page = StablePage(site, title)
self.errors += self.book_page.errors
self.toc = []
self.parse_toc()
self.parse_info()
@on_no_errors
def parse_toc(self):
soup = BeautifulSoup(self.book_page.text, 'html.parser')
try:
toctext = soup.find_all("div", class_="BookTOC")[0].text.strip()
except IndexError:
self.errors.append(
'No toc found for book "%s"' % self.book_page.friendly_title)
return
BookTocItemClz = (
LiveBookTocItem if self.version == "live"
else VersionizedBookTocItem)
# logging.debug("TOC: %s" % str(toctext))
for line in toctext.split(os.linesep):
if not line:
continue
item = BookTocItemClz(self.site, line)
self.errors += item.errors
if item.is_valid:
self.toc.append(item)
@on_no_errors
def parse_template_text(self, txt):
txt = txt[txt.find('|'):]
data = Bookinfo()
if not txt:
return data
while(True):
next_ = txt.find('|')
keyval = txt[:next_].split("=")
txt = txt[next_ + 1:]
if len(keyval) == 2:
k, v = keyval
data[k.strip()] = v.strip()
if next_ < 0:
break
return data
@on_no_errors
def parse_info(self):
txt = self.book_page.text
self.template_startpos = txt.find('{{')
self.template_endpos = txt.find('}}') + 2
inner = txt[self.template_startpos + 2: self.template_endpos - 2]
d = self.parse_template_text(inner)
if not d:
self.errors.append(
'No template found for "%s"' % self.book_page.friendly_title)
self.info = d
self.errors += self.info.validation_errors()
@on_no_errors
def get_pagetext_from_page(self, page):
txt = "\n\n=%s=\n\n" % page.friendly_title.replace(self.book_page.friendly_title+"/", "")
txt += page.text
txt, numsubs = re.subn(
"(=+\s*Einzelnachweise?\s*=+|<references\s*\/>)", "", txt)
if numsubs > 0:
self.found_references = True
return txt
@on_no_errors
def get_src(self):
self.found_references = False
txt = ''
for item in self.toc:
if item.depth > 1:
continue
ptxt = self.get_pagetext_from_page(item.stable_page)
txt += ptxt or ''
if self.found_references:
txt += "= Einzelnachweise =\n<references/>\n\n"
return txt
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import re
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _LI
from sahara.utils.notification import sender
conductor = c.API
LOG = logging.getLogger(__name__)
NATURAL_SORT_RE = re.compile('([0-9]+)')
def find_dict(iterable, **rules):
"""Search for dict in iterable of dicts using specified key-value rules."""
for item in iterable:
# assert all key-value pairs from rules dict
ok = True
for k, v in six.iteritems(rules):
ok = ok and k in item and item[k] == v
if ok:
return item
return None
def find(lst, **kwargs):
for obj in lst:
match = True
for attr, value in kwargs.items():
if getattr(obj, attr) != value:
match = False
if match:
return obj
return None
def get_by_id(lst, id):
for obj in lst:
if obj.id == id:
return obj
return None
# Taken from http://stackoverflow.com/questions/4836710/does-
# python-have-a-built-in-function-for-string-natural-sort
def natural_sort_key(s):
return [int(text) if text.isdigit() else text.lower()
for text in re.split(NATURAL_SORT_RE, s)]
def change_cluster_status_description(cluster, status_description):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster) if cluster else None
if cluster is None or cluster.status == "Deleting":
return cluster
return conductor.cluster_update(
ctx, cluster, {'status_description': status_description})
def change_cluster_status(cluster, status, status_description=None):
ctx = context.ctx()
# Update cluster status. Race conditions with deletion are still possible,
# but this reduces probability at least.
cluster = conductor.cluster_get(ctx, cluster) if cluster else None
# 'Deleting' is final and can't be changed
if cluster is None or cluster.status == 'Deleting':
return cluster
update_dict = {"status": status}
if status_description:
update_dict["status_description"] = status_description
cluster = conductor.cluster_update(ctx, cluster, update_dict)
LOG.info(_LI("Cluster status has been changed: id=%(id)s, New status="
"%(status)s"), {'id': cluster.id, 'status': cluster.status})
sender.notify(ctx, cluster.id, cluster.name, cluster.status,
"update")
return cluster
def count_instances(cluster):
return sum([node_group.count for node_group in cluster.node_groups])
def check_cluster_exists(cluster):
ctx = context.ctx()
# check if cluster still exists (it might have been removed)
cluster = conductor.cluster_get(ctx, cluster)
return cluster is not None
def get_instances(cluster, instances_ids=None):
inst_map = {}
for node_group in cluster.node_groups:
for instance in node_group.instances:
inst_map[instance.id] = instance
if instances_ids is not None:
return [inst_map[id] for id in instances_ids]
else:
return [v for v in six.itervalues(inst_map)]
def clean_cluster_from_empty_ng(cluster):
ctx = context.ctx()
for ng in cluster.node_groups:
if ng.count == 0:
conductor.node_group_remove(ctx, ng)
def generate_etc_hosts(cluster):
hosts = "127.0.0.1 localhost\n"
for node_group in cluster.node_groups:
for instance in node_group.instances:
hosts += "%s %s %s\n" % (instance.internal_ip,
instance.fqdn(),
instance.hostname())
return hosts
def generate_instance_name(cluster_name, node_group_name, index):
return ("%s-%s-%03d" % (cluster_name, node_group_name, index)).lower()
def generate_auto_security_group_name(node_group):
return ("%s-%s-%s" % (node_group.cluster.name, node_group.name,
node_group.id[:8])).lower()
def generate_aa_group_name(cluster_name):
return ("%s-aa-group" % cluster_name).lower()
def _get_consumed(start_time):
return timeutils.delta_seconds(start_time, timeutils.utcnow())
def get_obj_in_args(check_obj, *args, **kwargs):
for arg in args:
val = check_obj(arg)
if val is not None:
return val
for arg in kwargs.values():
val = check_obj(arg)
if val is not None:
return val
return None
def await_process(timeout, sleeping_time, op_name, check_object):
""""Awaiting something in cluster."""
def decorator(func):
@functools.wraps(func)
def handler(*args, **kwargs):
start_time = timeutils.utcnow()
cluster = get_obj_in_args(check_object, *args, **kwargs)
while _get_consumed(start_time) < timeout:
consumed = _get_consumed(start_time)
if func(*args, **kwargs):
LOG.info(
_LI("Operation %(op_name)s was successfully executed "
"in seconds: %(sec)s"), {'op_name': op_name,
'sec': consumed})
return
if not check_cluster_exists(cluster):
return
context.sleep(sleeping_time)
raise e.TimeoutException(timeout, op_name)
return handler
return decorator
|
|
#!/usr/bin/env python2
#
# Prepare a single ECMAScript testcase for execution and (optionally) execute
# it with Duktape or another engine. Interpret testcase results against the
# expect string and known issues.
#
# Currently no support for API testcases which require compilation and
# linking.
#
# XXX: encoding issues
# XXX: external minifier
# XXX: use strict support; perhaps via test metadata
# XXX: figure out command line options
# XXX: options to control diff printing, other summary data
# XXX: use logging or just prints? debug prints?
# XXX: nodejs comparison
import os
import sys
import traceback
import re
import optparse
import subprocess
from threading import Timer
import atexit
import shutil
import time
import datetime
import tempfile
import platform
import md5
import json
import yaml
import xml.etree.ElementTree as ET
#
# Platform detection
#
windows = platform.system() == 'Windows'
cygwin = 'CYGWIN' in platform.system()
istty = sys.stdout.isatty()
use_colors = istty and not windows
#
# Global variables, RegExp patterns, etc
#
# Parse a metadata block from source file.
re_meta = re.compile(r'/\*---\n^((.|\r|\n)*?)^---\*/', re.MULTILINE)
# Parse an expect block from source file. Initial newline is excluded but
# newline on last expect line is included.
re_expect = re.compile(r'/\*===\n^((?:.|\r|\n)*?)^===\*/|//>(.*?)$', re.MULTILINE)
# Parse a known issue: either a plain YAML file, or a YAML file separated by
# a '---' line, followed by expected output for the known issue.
re_knownissue = re.compile(r'((?:.|\n)*)\n---\n((?:.|\n)*)|((?:.|\n)*)', re.MULTILINE)
# Parse an include line.
re_include = re.compile('^/\*@include\s+(.*?)\s*@\*/$', re.MULTILINE)
# Parse a single line comment. Doesn't account for e.g. Regexps that may
# contain two successive slashes, so careful when using the built-in hacky
# minifier.
re_singlelinecomment = re.compile('//(.*?)$', re.MULTILINE)
# Tempdir for test running, shutil.rmtree()'d at exit.
tempdir = None
# Entry CWD and script path for relative resolution.
entry_cwd = None
script_path = None
# Optparse args and opts shamelessly via a shared global to avoid plumbing.
opts = {}
args = []
# Testcase filename, used for relative resolution.
testcase_filename = None
#
# ECMAScript test framework injected into ECMAScript test cases.
#
# Init code to run which allows the testcase to run on multiple engines.
ECMASCRIPT_TEST_FRAMEWORK=r'''
(function initTestFramework() {
var Test = {};
var G = new Function('return this')();
if (typeof G.Duktape === 'object' && G.Duktape !== null) {
Test.isDuktape = true;
Test.engine = 'duktape';
} else if (typeof G.Packages === 'object' && G.Packages !== null && String(Packages) === '[JavaPackage ]') {
Test.isRhino = true;
Test.engine = 'rhino';
} else if (typeof G.process === 'object' && G.process !== null && typeof G.process.version === 'string') {
Test.isV8 = true; // not exact, detects via Node.js
Test.engine = 'v8';
} else {
Test.engine = 'unknown';
}
Object.defineProperty(G, '__engine__', { value: Test.engine }); // XXX: to be removed, runtests compatibility
if (typeof G.print !== 'function') {
if (G.process && G.process.stdout && typeof G.process.stdout.write === 'function') {
G.print = function print() {
process.stdout.write(Array.prototype.map.call(arguments, String).join(' ') + '\n');
};
} else if (G.console && typeof G.console.log === 'function') {
G.print = function print() {
console.log(Array.prototype.map.call(arguments, String).join(' '));
};
}
}
if (Test.engine === 'duktape' && typeof G.console === 'undefined') {
G.console = {
log: print
};
}
})();
'''
#
# File I/O helpers.
#
# Read file.
def read_file(fn):
with open(fn, 'rb') as f:
return f.read()
# Write file.
def write_file(fn, data):
assert(isinstance(data, str))
with open(fn, 'wb') as f:
f.write(data)
# Convert to Windows path if necessary, used when running commands from Cygwin.
def path_to_platform(path):
if not cygwin: return path
return subprocess.check_output([ 'cygpath', '-w', path ]).strip()
#
# Text processing helpers.
#
# Apply ANSI coloring.
# http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
def ansi_color(text, color):
if use_colors:
return '\x1b[' + color + 'm' + text + '\x1b[0m'
return text
def green(text):
return ansi_color(text, '1;32;40')
def red(text):
return ansi_color(text, '1;37;41')
def blue(text):
return ansi_color(text, '0;37;44')
def yellow(text):
return ansi_color(text, '0;33;40')
def grey(text):
return ansi_color(text, '6;37;40')
# Parse lines. Split a text input into lines using LF as the line separator.
# Assume last line is terminated with a LF and ignore an "empty line" that
# follows it.
def parse_lines(data):
lines = data.split('\n')
if lines[-1] == '':
lines.pop()
else:
print('WARNING: last line of input did not contain a LF')
return lines
# Combine lines into text. Last line has trailing LF.
def combine_lines(lines):
return '\n'.join(lines) + '\n'
# Count lines in text input.
def count_lines(data):
return len(parse_lines(data))
# Indent lines.
def indent_lines(lines, count):
prepend = ' ' * count
return map(lambda x: prepend + x, lines)
# Clip text to maximum line count and column count.
def clip_lines(lines, start_idx, end_idx, column_limit=None):
def clipline(x):
if column_limit is not None and len(x) > column_limit:
return x[0:column_limit] + ' [... %d more chars]' % (len(x) - column_limit)
return x
res = [clipline(x) for x in lines[start_idx:end_idx]]
if len(lines) > end_idx:
res.append('[... %d more lines]' % (len(lines) - end_idx))
return res
# Remove carriage returns.
def remove_cr(data):
return data.replace('\r', '')
#
# Valgrind result processing
#
def parse_massif_result(f, res):
# Allocated bytes.
re_heap_b = re.compile(r'^mem_heap_B=(\d+)$')
# Allocation overhead. Matters for desktop environments, for efficient
# zero overhead pool allocators this is not usually a concern (the waste
# in a pool allocator behaves very differently than a libc allocator).
re_heap_extra_b = re.compile(r'^mem_heap_extra_B=(\d+)$')
# Stacks.
re_stacks_b = re.compile(r'^mem_stacks_B=(\d+)$')
peak_heap = 0
peak_stack = 0
for line in f:
line = line.strip()
#print(line)
m1 = re_heap_b.match(line)
m2 = re_heap_extra_b.match(line)
m3 = re_stacks_b.match(line)
heap = None
if m1 is not None:
heap = int(m1.group(1))
stack = None
if m3 is not None:
stack = int(m3.group(1))
if heap is not None:
peak_heap = max(peak_heap, heap)
if stack is not None:
peak_stack = max(peak_stack, stack)
res['massif_peak_heap_bytes'] = peak_heap
res['massif_peak_stack_bytes'] = peak_stack
def parse_memcheck_result(f, res):
try:
tree = ET.parse(f)
except ET.ParseError:
res['errors'].append('memcheck-parse-failed')
return
root = tree.getroot()
if root.tag != 'valgrindoutput':
raise Exception('invalid valgrind xml format')
def parse_error(node):
err = {}
for child in node.findall('kind'):
err['kind'] = child.text
for child in node.findall('xwhat'):
for child2 in child.findall('text'):
err['text'] = child2.text
res['errors'].append(err['kind'])
# XXX: make res['errors'] structured rather than text list?
# 'err' is now ignored.
for child in root.findall('error'):
parse_error(child)
#
# Test execution and result interpretation helpers.
#
# Get a unified diff between 'a' and 'b'.
def get_diff(a, b):
if a == b:
return ''
fn_a = os.path.abspath(os.path.join(os.path.join(tempdir, 'diff-a')))
fn_b = os.path.abspath(os.path.join(os.path.join(tempdir, 'diff-b')))
write_file(fn_a, a)
write_file(fn_b, b)
cmd = None
try:
if windows:
cmd = [ 'fc', path_to_platform(fn_a), path_to_platform(fn_b) ]
else:
cmd = [ 'diff', '--text', '-u', fn_a, fn_b ]
#print('Executing: %r' % cmd)
proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.communicate(input=None)
# ignore proc.returncode: diff returns 0 if inputs are same, nonzero otherwise.
if len(ret[1]) > 0:
print('Unexpected diff/fc stderr output:')
print(ret[1])
return ret[0]
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Command execution failed for %r:\n%s' % (cmd, traceback.format_exc(exc_traceback)))
return '*** Failed to diff ***'
# Find testcase. Testcase can be given with full (absolute or relative) path
# or just as 'test-dev-mandel2-func.js' for convenience, in which case we'll
# try to find it relative to the script path.
def find_testcase(name):
if os.path.isfile(name):
return name
for dirname in [
os.path.join(script_path, '..', 'tests', 'ecmascript'),
os.path.join(entry_cwd, 'tests', 'ecmascript'),
os.path.join(script_path, '..', 'tests', 'api'),
os.path.join(entry_cwd, 'tests', 'api')
]:
abs_fn = os.path.abspath(os.path.join(dirname, name))
#print('Find testcase, try: %r' % abs_fn)
if os.path.isfile(abs_fn):
return abs_fn
raise Exception('cannot find testcase: %r' % name)
# Find duktape command for convenience if not given explicitly.
def find_duktape():
for fn in [
os.path.join('.', 'duk'),
os.path.join('.', 'duk.exe'),
os.path.join(script_path, '..', 'duk'),
os.path.join(script_path, '..', 'duk.exe'),
]:
abs_fn = os.path.abspath(fn)
#print('Find duk command, try: %r' % abs_fn)
if os.path.exists(abs_fn):
return abs_fn
raise Exception('failed to locate "duk" command')
# Parse metadata from testcase file.
def parse_metadata(data):
res = {}
for m in re_meta.finditer(data):
assert(m is not None)
doc = yaml.safe_load(m.group(1)) # YAML also accepts JSON
for k in doc.keys():
res[k] = doc[k]
return res
# Parse expect string from testcase file.
def parse_expected_result(data):
res = []
for m in re_expect.finditer(data):
assert(m is not None)
if m.group(1) is not None:
res.append(m.group(1))
elif m.group(2) is not None:
res.append(m.group(2) + '\n') # Single-line shorthand
else:
raise Exception('internal error')
return ''.join(res)
# Read include file, automatic path lookup.
def read_include_file(filename):
abs_fn = os.path.abspath(os.path.join(os.path.dirname(testcase_filename), filename))
#print('Include: %r -> %r' % (filename, abs_fn))
data = read_file(abs_fn)
return '/* Included: %r -> %r */ ' % (filename, abs_fn) + data
# Minify ECMAScript code either using an external minifier or a simple built-in
# minifier which replaces single line comments with /* */ comments and then
# replaces newlines with space. This works in most cases, but assumes that
# semicolons are used in the source and that RegExps don't contain '//'
# sequences (slashes can be escaped). The result is always a one-liner.
def minify_ecmascript(data):
if '\n' not in data:
return data
fn_in = os.path.abspath(os.path.join(tempdir, 'minify-input'))
fn_out = os.path.abspath(os.path.join(tempdir, 'minify-output'))
write_file(fn_in, data)
res = None
if opts.minify_closure is not None:
rc = subprocess.call([ 'java', '-jar', path_to_platform(opts.minify_closure),
'--js_output_file', path_to_platform(fn_out), path_to_platform(fn_in) ])
if rc != 0:
raise Exception('closure minify failed')
res = read_file(fn_out)
res = res.replace('\n', ' ') # for some reason closure sometimes outputs newlines
elif opts.minify_uglifyjs is not None:
rc = subprocess.call([ opts.minify_uglifyjs, '-o',
path_to_platform(fn_out), path_to_platform(fn_in) ])
if rc != 0:
raise Exception('uglifyjs minify failed')
res = read_file(fn_out)
elif opts.minify_uglifyjs2 is not None:
rc = subprocess.call([ opts.minify_uglifyjs2, '-o',
path_to_platform(fn_out), path_to_platform(fn_in) ])
if rc != 0:
raise Exception('uglifyjs2 minify failed')
res = read_file(fn_out)
else:
#print('Input is not minified, no minifier given, using built-in simple minifier')
def repl_comment(m):
return '/* ' + m.group(1) + '*/'
res = re.sub(re_singlelinecomment, repl_comment, data)
res = res.replace('\n', ' ')
res = res.strip()
assert('\n' not in res)
return res
# Inject utilities and other testing support functionality as one-liners
# into the testcase. Using a one-liner avoids disturbing line numbers in
# the testcase. The support code has ideally been already minified, but
# if not, try to minify it. If there's no minifier, simply assume semicolons
# have been used correctly and replace newlines with spaces.
def prepare_ecmascript_testcase(data, meta):
# Process includes.
def repl_include(m):
incfile = read_include_file(m.group(1))
return minify_ecmascript(incfile)
data = re.sub(re_include, repl_include, data)
# Inject shared engine prefix.
data = minify_ecmascript(ECMASCRIPT_TEST_FRAMEWORK) + ' ' + data
# If the testcase needs to run strict program level code, prepend a
# 'use strict' declaration once all the other preparations are done.
if meta.get('use_strict', False):
data = "'use strict'; " + data
# Manually enabled Promise hack.
if False:
data = data + '\n' + "if (typeof Promise === 'function' && typeof Promise.runQueue === 'function') { Promise.runQueue(); }"
return data
# Similar preparation for API testcases.
def prepare_api_testcase(data):
raise Exception('not implemented')
# Parse a known issue file.
def parse_known_issue(data):
m = re_knownissue.match(data)
if m is None:
raise Exception('failed to parse known issue file')
elif m.group(1) is not None and m.group(2) is not None:
meta = yaml.safe_load(m.group(1))
meta['output'] = m.group(2) # add expected (known issue, i.e. buggy) output as .output
elif m.group(3) is not None:
meta = yaml.safe_load(m.group(3))
else:
raise Exception('failed to parse known issue file')
return meta
# Find known issues directory.
def find_known_issues():
for dirname in [
os.path.join(os.path.dirname(testcase_filename), '..', 'knownissues'),
os.path.join(script_path, '..', 'tests', 'knownissues'),
os.path.join(entry_cwd, 'tests', 'knownissues')
]:
#print('Find known issues, try: %r' % dirname)
if os.path.isdir(dirname):
return dirname
raise Exception('failed to locate known issues')
# Check known issues against output data.
def check_known_issues(dirname, output):
output_md5 = md5.md5(output).digest().encode('hex')
files = sorted(os.listdir(dirname))
for fn in files:
abs_fn = os.path.abspath(os.path.join(dirname, fn))
#print('Check known issue: %r' % abs_fn)
try:
meta = parse_known_issue(read_file(abs_fn))
except:
print('Failed to parse known issue: %r' % abs_fn)
meta = {}
if meta.get('output', None) == output:
return meta
elif meta.get('md5', '').lower() == output_md5:
return meta
return None
#
# Testcase execution.
#
# Execute Ecmscript testcase with optional timeout and valgrind wrapping.
# http://stackoverflow.com/questions/1191374/using-module-subprocess-with-timeout
# For Cygwin the command name should use Unix path but argument paths
# should be Windows converted.
def execute_ecmascript_testcase(res, data, name, polyfills):
test_fn = os.path.abspath(os.path.join(tempdir, name))
write_file(test_fn, data)
valgrind_output = None
cmd = []
try:
start_time = time.time()
try:
if opts.valgrind:
res['valgrind'] = True
res['valgrind_tool'] = opts.valgrind_tool
cmd += [ 'valgrind' ]
cmd += [ '--tool=' + opts.valgrind_tool ]
valgrind_output = os.path.abspath(os.path.join(tempdir, 'valgrind.out'))
if opts.valgrind_tool == 'massif':
cmd += [ '--massif-out-file=' + path_to_platform(valgrind_output) ]
#cmd += [ '--peak-inaccuracy=0.0' ]
#cmd += [ '--stacks=yes' ]
elif opts.valgrind_tool == 'memcheck':
cmd += [ '--xml=yes', '--xml-file=' + path_to_platform(valgrind_output) ]
else:
raise Exception('invalid valgrind tool %r' % opts.valgrind_tool)
cmd += [ path_to_platform(os.path.abspath(opts.duk)) ]
else:
cmd += [ os.path.abspath(opts.duk) ]
for fn in polyfills:
cmd += [ path_to_platform(os.path.abspath(fn)) ]
cmd += [ path_to_platform(os.path.abspath(test_fn)) ]
res['command'] = cmd
#print('Executing: %r' % cmd)
proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.abspath(tempdir))
timeout_sec = opts.timeout
def kill_proc(p):
print('Killing testcase process due to timeout (%d seconds)' % timeout_sec)
res['timeout'] = True
p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
try:
timer.start()
ret = proc.communicate(input=None)
finally:
timer.cancel()
res['stdout'] = remove_cr(ret[0])
res['stderr'] = remove_cr(ret[1])
res['returncode'] = proc.returncode
if opts.valgrind:
res['valgrind_output'] = ret[1]
res['stderr'] = '' # otherwise interpreted as an error
if valgrind_output is not None and os.path.exists(valgrind_output):
with open(valgrind_output, 'rb') as f:
res['valgrind_output'] += f.read()
with open(valgrind_output, 'rb') as f:
if opts.valgrind_tool == 'massif':
parse_massif_result(f, res)
elif opts.valgrind_tool == 'memcheck':
parse_memcheck_result(f, res)
else:
res['errors'].append('no-valgrind-output')
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print('Command execution failed for %r:\n%s' % (cmd, traceback.format_exc(exc_traceback)))
res['stdout'] = traceback.format_exc(exc_traceback)
res['stderr'] = traceback.format_exc(exc_traceback)
res['returncode'] = 1
finally:
end_time = time.time()
res['duration'] = end_time - start_time
# Execute API testcase with optional timeout and valgrind wrapping.
def execute_api_testcase(data):
raise Exception('unimplemented')
# Interpret test results against expected result and known issues.
# Return a JSON-compatible object providing test results.
def interpret_test_result(doc, expect):
meta = doc['metadata']
errors = doc['errors']
known_meta = check_known_issues(opts.known_issues, doc['stdout'])
success = True
if doc['stdout'] != expect:
errors.append('expect-mismatch')
success = False
if doc['returncode'] != 0:
if meta.get('intended_uncaught', False):
# Test case is intended to throw an uncaught error. This is
# necessary to test some errors that occur at the program level.
pass
else:
errors.append('returncode-nonzero')
success = False
if doc['stderr'] != '':
if meta.get('intended_uncaught', False):
pass
else:
errors.append('stderr-nonempty')
success = False
if doc['timeout']:
errors.append('exec-timeout')
if known_meta is not None:
errors.append('known-issue')
success = False
doc['knownissue'] = known_meta.get('summary', 'no summary')
doc['knownissue_meta'] = known_meta
if len(errors) > 0:
success = False
doc['success'] = success
doc['errors'] = errors
doc['diff_expect'] = get_diff(expect, doc['stdout'])
#
# Human readable summary.
#
# Print testcase summary from result JSON object.
def print_summary(doc):
meta = doc['metadata']
def fmt_time(x):
if x >= 60:
return '%.1f m' % (float(x) / 60.0)
else:
return '%.1f s' % float(x)
def fmt_size(x):
if x < 1024 * 1024:
return '%.2f k' % (float(x) / 1024.0)
else:
return '%.2f M' % (float(x) / (1024.0 * 1024.0))
parts = []
issues = []
test_result = '???'
test_name = doc['testcase_name'].ljust(50)
print_diff = True # print diff if it is nonzero
test_time = fmt_time(doc['duration'])
test_time = '[%s]' % (test_time.rjust(6))
if doc['skipped']:
test_result = 'SKIPPED'
elif doc['success']:
if doc['timeout']:
test_result = yellow('TIMEOUT')
print_diff = False
else:
test_result = green('SUCCESS')
else:
if doc['timeout']:
test_result = yellow('TIMEOUT')
print_diff = False
elif doc['knownissue'] != '':
test_result = blue('KNOWN ')
print_diff = False
else:
test_result = red('FAILURE')
issues += [ '[%d diff lines]' % count_lines(doc['diff_expect']) ]
if doc['knownissue'] != '':
issues += [ '[known: ' + doc['knownissue'] + ']' ]
if len(doc['errors']) > 0:
issues += [ '[errors: ' + ','.join(doc['errors']) + ']' ]
parts += [ test_result, test_name ]
if doc['duration'] >= 60.0:
parts += [ blue(test_time) ]
elif doc['duration'] >= 5.0:
parts += [ yellow(test_time) ]
else:
parts += [ test_time ]
if doc.has_key('massif_peak_heap_bytes'):
tmp = []
tmp += [ '%s heap' % fmt_size(doc['massif_peak_heap_bytes']) ]
#tmp += [ '%s stack' % fmt_size(doc['massif_peak_stack_bytes']) ]
parts += [ '[%s]' % (', '.join(tmp).rjust(14)) ]
if doc.has_key('valgrind_tool'):
parts += [ grey('[%s]' % doc['valgrind_tool']) ]
parts += issues
print(' '.join(parts))
if doc['stderr'] != '' and not meta.get('intended_uncaught', False):
if True:
print('- Test wrote to stderr:')
stderr_lines = parse_lines(doc['stderr'])
stderr_lines = clip_lines(stderr_lines, 0, opts.clip_lines, opts.clip_columns)
stderr_lines = indent_lines(stderr_lines, 4)
sys.stdout.write(combine_lines(stderr_lines))
else:
pass
if doc['diff_expect'] != '' and print_diff:
if True:
print('- Diff to expected result:')
skip = 2 # skip a few uninteresting diff lines by default
if windows: skip = 0 # but not 'fc'
diff_lines = parse_lines(doc['diff_expect'])
diff_lines = clip_lines(diff_lines, skip, skip + opts.clip_lines, opts.clip_columns)
diff_lines = indent_lines(diff_lines, 4)
sys.stdout.write(combine_lines(diff_lines))
else:
pass
#
# Main program.
#
def main():
global tempdir, args, opts, entry_cwd, script_path, testcase_filename
exitcode = 0
# Get script path and current CWD for relative resolution. Plumbed
# through globals to minimize argument passing.
entry_cwd = os.getcwd()
script_path = sys.path[0] # http://stackoverflow.com/questions/4934806/how-can-i-find-scripts-directory-with-python
# Parse options.
parser = optparse.OptionParser(
usage='Usage: %prog [options] testcase',
description='Prepare an ECMAScript or API testcase for execution and (optionally) execute the testcase, print a summary, and write a JSON result file for further user. Testcase can be given using a full path or using just the test name in which case it is looked up from ../tests/ecmascript/ relative to the runtest.py script.'
)
parser.add_option('--known-issues', dest='known_issues', default=None, help='Path to known issues directory, default is autodetect')
parser.add_option('--ignore-skip', dest='ignore_skip', default=False, action='store_true', help='Ignore skip=true in metadata')
parser.add_option('--minify-uglifyjs2', dest='minify_uglifyjs2', default=None, help='Path to UglifyJS2 to be used for minifying')
parser.add_option('--minify-uglifyjs', dest='minify_uglifyjs', default=None, help='Path to UglifyJS to be used for minifying')
parser.add_option('--minify-closure', dest='minify_closure', default=None, help='Path to Closure compiler.jar to be used for minifying')
parser.add_option('--duk', dest='duk', default=None, help='Path to "duk" command, default is autodetect')
parser.add_option('--timeout', dest='timeout', type='int', default=15*60, help='Test execution timeout (seconds), default 15min')
parser.add_option('--polyfill', dest='polyfills', default=[], action='append', help='Polyfill script(s) for duk')
parser.add_option('--valgrind', dest='valgrind', action='store_true', default=False, help='Run test inside valgrind')
parser.add_option('--valgrind-tool', dest='valgrind_tool', default=None, help='Valgrind tool to use (implies --valgrind)')
parser.add_option('--memcheck', dest='memcheck', default=False, action='store_true', help='Shorthand for --valgrind-tool memcheck')
parser.add_option('--massif', dest='massif', default=False, action='store_true', help='Shorthand for --valgrind-tool massif')
parser.add_option('--prepare-only', dest='prepare_only', action='store_true', default=False, help='Only prepare a testcase without running it')
parser.add_option('--clip-lines', dest='clip_lines', type='int', default=15, help='Number of lines for stderr/diff summaries')
parser.add_option('--clip-columns', dest='clip_columns', type='int', default=160, help='Number of columns for stderr/diff summaries')
parser.add_option('--output-prepared', dest='output_prepared', default=None, help='Filename for prepared testcase')
parser.add_option('--output-result', dest='output_result', default=None, help='Filename for result JSON file')
parser.add_option('--output-stdout', dest='output_stdout', default=None, help='Filename for writing verbatim test stdout')
parser.add_option('--output-stderr', dest='output_stderr', default=None, help='Filename for writing verbatim test stderr')
parser.add_option('--output-diff', dest='output_diff', default=None, help='Filename for writing testcase expect-to-actual diff')
parser.add_option('--output-valgrind', dest='output_valgrind', default=None, help='Filename for writing valgrind output')
(opts, args) = parser.parse_args()
# Some option defaulting.
if opts.duk is None:
opts.duk = find_duktape()
#print('Autodetect "duk" command: %r' % opts.duk)
testcase_filename = find_testcase(args[0])
if opts.known_issues is None:
opts.known_issues = find_known_issues()
#print('Autodetect known issues directory: %r' % opts.known_issues)
if opts.memcheck:
opts.valgrind = True
opts.valgrind_tool = 'memcheck'
if opts.massif:
opts.valgrind = True
opts.valgrind_tool = 'massif'
if opts.valgrind_tool is not None:
opts.valgrind = True
if opts.valgrind and opts.valgrind_tool is None:
opts.valgrind_tool = 'memcheck'
# Create a temporary directory for anything test related, automatic
# atexit deletion. Plumbed through globals to minimize argument passing.
tempdir = tempfile.mkdtemp(prefix='tmp-duk-runtest-')
atexit.register(shutil.rmtree, tempdir)
#print('Using temporary directory: %r' % tempdir)
# Read testcase, scan metadata and expected result.
data = remove_cr(open(testcase_filename, 'rb').read())
name = os.path.basename(testcase_filename)
meta = parse_metadata(data)
expect = parse_expected_result(data)
# Prepare runnable testcase by injecting an ECMAScript test framework
# and processing @include lines.
data = prepare_ecmascript_testcase(data, meta)
if opts.output_prepared is not None:
write_file(opts.output_prepared, data)
print('Wrote prepared testcase to: %r' % opts.output_prepared)
# Initialize result object, filling fields with defaults so that calling
# code can (at least mostly) rely on all fields being present.
res = {}
res['testcase_file'] = os.path.abspath(testcase_filename)
res['testcase_name'] = name
res['expect'] = expect
res['metadata'] = meta
res['skipped'] = False
res['success'] = True
res['errors'] = []
res['diff_expect'] = ''
res['knownissue'] = ''
res['knownissue_meta'] = None
res['skipped'] = True
res['command'] = []
res['valgrind'] = False
res['valgrind_output'] = ''
res['stdout'] = ''
res['stderr'] = ''
res['returncode'] = 0
res['timeout'] = False
res['duration'] = 0
# Execute testcase unless requested to just prepare the testcase.
# Execution result is a JSON-compatible object which can be written
# out for further processing by the caller (e.g. to process results
# of running multiple tests). Print a maximally helpful, human readable
# summary based on the same JSON-compatible result object.
if not opts.prepare_only:
if meta.get('skip', False) and not opts.ignore_skip:
res['skipped'] = True
else:
res['skipped'] = False
execute_ecmascript_testcase(res, data, name, opts.polyfills)
interpret_test_result(res, expect)
print_summary(res)
if not res['success']:
exitcode = 1
else:
pass
# Write out requested output files: test result JSON, test raw
# stdout/stderr, etc.
if opts.output_result is not None:
write_file(opts.output_result, json.dumps(res, indent=4, ensure_ascii=True).encode('utf-8') + '\n')
print('Wrote test result JSON data to: %r' % opts.output_result)
if opts.output_stdout is not None:
write_file(opts.output_stdout, res['stdout'])
print('Wrote test stdout to: %r' % opts.output_stdout)
if opts.output_stderr is not None:
write_file(opts.output_stderr, res['stderr'])
print('Wrote test stderr to: %r' % opts.output_stderr)
if opts.output_diff is not None:
write_file(opts.output_diff, res['diff_expect'])
print('Wrote test expect diff to: %r' % opts.output_diff)
if opts.output_valgrind is not None:
write_file(opts.output_valgrind, res['valgrind_output'])
print('Wrote test valgrind output to: %r' % opts.output_valgrind)
# Exit with 0 if test was considered success, non-zero otherwise.
sys.exit(exitcode)
if __name__ == '__main__':
main()
|
|
import sys
from time import sleep
import pygame
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, ship, bullets):
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_q:
sys.exit()
def fire_bullet(ai_settings, screen, ship, bullets):
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_keyup_events(event, ship):
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens,
bullets):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, sb, play_button,
ship, aliens, bullets, mouse_x, mouse_y)
def check_play_button(ai_settings, screen, stats, sb, play_button, ship,
aliens, bullets, mouse_x, mouse_y):
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked and not stats.game_active:
ai_settings.initialize_dynamic_settings()
pygame.mouse.set_visible(False)
if play_button.rect.collidepoint(mouse_x, mouse_y):
stats.reset_stats()
stats.game_active = True
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
aliens.empty()
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets,
play_button):
screen.fill(ai_settings.bg_color)
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
sb.show_score()
if not stats.game_active:
play_button.draw_button()
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens,
bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens,
bullets):
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
bullets.empty()
ai_settings.increase_speed()
stats.level += 1
sb.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def get_number_aliens_x(ai_settings, alien_width):
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
available_space_y = (
ai_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height,
alien.rect.height)
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_alien(ai_settings, screen, aliens, alien_number, row_number)
def check_fleet_edges(ai_settings, aliens):
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
if stats.ships_left > 0:
stats.ships_left -= 1
sb.prep_ships()
aliens.empty()
bullets.empty()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
check_fleet_edges(ai_settings, aliens)
aliens.update()
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_high_score(stats, sb):
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
|
|
from collections import Callable
from numbers import Real
import openmc
import openmc.model
import openmc.checkvalue as cv
_SCALAR_BRACKETED_METHODS = ['brentq', 'brenth', 'ridder', 'bisect']
def _search_keff(guess, target, model_builder, model_args, print_iterations,
print_output, guesses, results):
"""Function which will actually create our model, run the calculation, and
obtain the result. This function will be passed to the root finding
algorithm
Parameters
----------
guess : Real
Current guess for the parameter to be searched in `model_builder`.
target_keff : Real
Value to search for
model_builder : collections.Callable
Callable function which builds a model according to a passed
parameter. This function must return an openmc.model.Model object.
model_args : dict
Keyword-based arguments to pass to the `model_builder` method.
print_iterations : bool
Whether or not to print the guess and the resultant keff during the
iteration process.
print_output : bool
Whether or not to print the OpenMC output during the iterations.
guesses : Iterable of Real
Running list of guesses thus far, to be updated during the execution of
this function.
results : Iterable of Real
Running list of results thus far, to be updated during the execution of
this function.
Returns
-------
float
Value of the model for the current guess compared to the target value.
"""
# Build the model
model = model_builder(guess, **model_args)
# Run the model and obtain keff
keff = model.run(output=print_output)
# Close the model to ensure HDF5 will allow access during the next
# OpenMC execution
model.close()
# Record the history
guesses.append(guess)
results.append(keff)
if print_iterations:
text = 'Iteration: {}; Guess of {:.2e} produced a keff of ' + \
'{:1.5f} +/- {:1.5f}'
print(text.format(len(guesses), guess, keff[0], keff[1]))
return (keff[0] - target)
def search_for_keff(model_builder, initial_guess=None, target=1.0,
bracket=None, model_args=None, tol=None,
bracketed_method='bisect', print_iterations=False,
print_output=False, **kwargs):
"""Function to perform a keff search by modifying a model parametrized by a
single independent variable.
Parameters
----------
model_builder : collections.Callable
Callable function which builds a model according to a passed
parameter. This function must return an openmc.model.Model object.
initial_guess : Real, optional
Initial guess for the parameter to be searched in
`model_builder`. One of `guess` or `bracket` must be provided.
target : Real, optional
keff value to search for, defaults to 1.0.
bracket : None or Iterable of Real, optional
Bracketing interval to search for the solution; if not provided,
a generic non-bracketing method is used. If provided, the brackets
are used. Defaults to no brackets provided. One of `guess` or `bracket`
must be provided. If both are provided, the bracket will be
preferentially used.
model_args : dict, optional
Keyword-based arguments to pass to the `model_builder` method. Defaults
to no arguments.
tol : float
Tolerance to pass to the search method
bracketed_method : {'brentq', 'brenth', 'ridder', 'bisect'}, optional
Solution method to use; only applies if
`bracket` is set, otherwise the Secant method is used.
Defaults to 'bisect'.
print_iterations : bool
Whether or not to print the guess and the result during the iteration
process. Defaults to False.
print_output : bool
Whether or not to print the OpenMC output during the iterations.
Defaults to False.
**kwargs
All remaining keyword arguments are passed to the root-finding
method.
Returns
-------
zero_value : float
Estimated value of the variable parameter where keff is the
targeted value
guesses : List of Real
List of guesses attempted by the search
results : List of 2-tuple of Real
List of keffs and uncertainties corresponding to the guess attempted by
the search
"""
if initial_guess is not None:
cv.check_type('initial_guess', initial_guess, Real)
if bracket is not None:
cv.check_iterable_type('bracket', bracket, Real)
cv.check_length('bracket', bracket, 2)
cv.check_less_than('bracket values', bracket[0], bracket[1])
if model_args is None:
model_args = {}
else:
cv.check_type('model_args', model_args, dict)
cv.check_type('target', target, Real)
cv.check_type('tol', tol, Real)
cv.check_value('bracketed_method', bracketed_method,
_SCALAR_BRACKETED_METHODS)
cv.check_type('print_iterations', print_iterations, bool)
cv.check_type('print_output', print_output, bool)
cv.check_type('model_builder', model_builder, Callable)
# Run the model builder function once to make sure it provides the correct
# output type
if bracket is not None:
model = model_builder(bracket[0], **model_args)
elif initial_guess is not None:
model = model_builder(initial_guess, **model_args)
cv.check_type('model_builder return', model, openmc.model.Model)
import scipy.optimize as sopt
# Set the iteration data storage variables
guesses = []
results = []
# Set the searching function (for easy replacement should a later
# generic function be added.
search_function = _search_keff
if bracket is not None:
# Generate our arguments
args = {'f': search_function, 'a': bracket[0], 'b': bracket[1]}
if tol is not None:
args['rtol'] = tol
# Set the root finding method
if bracketed_method == 'brentq':
root_finder = sopt.brentq
elif bracketed_method == 'brenth':
root_finder = sopt.brenth
elif bracketed_method == 'ridder':
root_finder = sopt.ridder
elif bracketed_method == 'bisect':
root_finder = sopt.bisect
elif initial_guess is not None:
# Generate our arguments
args = {'func': search_function, 'x0': initial_guess}
if tol is not None:
args['tol'] = tol
# Set the root finding method
root_finder = sopt.newton
else:
raise ValueError("Either the 'bracket' or 'initial_guess' parameters "
"must be set")
# Add information to be passed to the searching function
args['args'] = (target, model_builder, model_args, print_iterations,
print_output, guesses, results)
# Create a new dictionary with the arguments from args and kwargs
args.update(kwargs)
# Perform the search
zero_value = root_finder(**args)
return zero_value, guesses, results
|
|
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
from gevent import monkey
monkey.patch_all()
import os
import sys
import socket
import subprocess
import json
import time
import datetime
import platform
import select
import gevent
import ConfigParser
from nodemgr.common.event_manager import EventManager
from ConfigParser import NoOptionError
from supervisor import childutils
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames,\
Module2NodeType, INSTANCE_ID_DEFAULT, SERVICE_CONTRAIL_DATABASE, \
RepairNeededKeyspaces
from subprocess import Popen, PIPE
from StringIO import StringIO
from database.sandesh.database.ttypes import \
NodeStatusUVE, NodeStatus, DatabaseUsageStats,\
DatabaseUsageInfo, DatabaseUsage
from database.sandesh.database.process_info.ttypes import \
ProcessStatus, ProcessState, ProcessInfo, DiskPartitionUsageStats
from database.sandesh.database.process_info.constants import \
ProcessStateNames
class DatabaseEventManager(EventManager):
def __init__(self, rule_file, discovery_server,
discovery_port, collector_addr,
hostip, minimum_diskgb, cassandra_repair_interval):
EventManager.__init__(
self, rule_file, discovery_server,
discovery_port, collector_addr)
self.node_type = "contrail-database"
self.module = Module.DATABASE_NODE_MGR
self.module_id = ModuleNames[self.module]
self.hostip = hostip
self.minimum_diskgb = minimum_diskgb
self.cassandra_repair_interval = cassandra_repair_interval
self.supervisor_serverurl = "unix:///tmp/supervisord_database.sock"
self.add_current_process()
# end __init__
def process(self):
if self.rule_file is '':
self.rule_file = "/etc/contrail/" + \
"supervisord_database_files/contrail-database.rules"
json_file = open(self.rule_file)
self.rules_data = json.load(json_file)
node_type = Module2NodeType[self.module]
node_type_name = NodeTypeNames[node_type]
_disc = self.get_discovery_client()
sandesh_global.init_generator(
self.module_id, socket.gethostname(), node_type_name,
self.instance_id, self.collector_addr, self.module_id, 8103,
['database.sandesh'], _disc)
# sandesh_global.set_logging_params(enable_local_log=True)
self.sandesh_global = sandesh_global
try:
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/cassandra.yaml | grep '-' | cut -d'-' -f2"
else:
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/conf/cassandra.yaml | grep '-' | cut -d'-' -f2"
(cassandra_data_dir, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
cassandra_data_dir = cassandra_data_dir.strip()
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
if os.path.exists(analytics_dir):
self.stderr.write("analytics_dir is " + analytics_dir + "\n")
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$3}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(disk_space_used, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$4}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(disk_space_available, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `du -skL " + analytics_dir + " | awk '{s+=$1}END{print s}'` && echo $1"
self.stderr.write("popen_cmd is " + popen_cmd + "\n")
(analytics_db_size, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
disk_space_total = int(disk_space_used) + int(disk_space_available)
if (disk_space_total / (1024 * 1024) < self.minimum_diskgb):
cmd_str = "service " + SERVICE_CONTRAIL_DATABASE + " stop"
(ret_value, error_value) = Popen(
cmd_str, shell=True, stdout=PIPE).communicate()
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE
self.fail_status_bits &= ~self.FAIL_STATUS_DISK_SPACE_NA
else:
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
except:
sys.stderr.write("Failed to get database usage" + "\n")
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
def send_process_state_db(self, group_names):
self.send_process_state_db_base(
group_names, ProcessInfo, NodeStatus, NodeStatusUVE)
def send_nodemgr_process_status(self):
self.send_nodemgr_process_status_base(
ProcessStateNames, ProcessState, ProcessStatus,
NodeStatus, NodeStatusUVE)
def get_process_state(self, fail_status_bits):
return self.get_process_state_base(
fail_status_bits, ProcessStateNames, ProcessState)
def get_failbits_nodespecific_desc(self, fail_status_bits):
description = ""
if fail_status_bits & self.FAIL_STATUS_DISK_SPACE:
description += "Disk for analytics db is too low," + \
" cassandra stopped."
if fail_status_bits & self.FAIL_STATUS_SERVER_PORT:
if description != "":
description += " "
description += "Cassandra state detected DOWN."
if fail_status_bits & self.FAIL_STATUS_DISK_SPACE_NA:
description += "Disk space for analytics db not retrievable."
return description
def database_periodic(self):
try:
(linux_dist, x, y) = platform.linux_distribution()
if (linux_dist == 'Ubuntu'):
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/cassandra.yaml | grep '-' | cut -d'-' -f2"
else:
popen_cmd = "grep -A 1 'data_file_directories:'" + \
" /etc/cassandra/conf/cassandra.yaml | grep '-' | cut -d'-' -f2"
(cassandra_data_dir, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
cassandra_data_dir = cassandra_data_dir.strip()
analytics_dir = cassandra_data_dir + '/ContrailAnalytics'
if os.path.exists(analytics_dir):
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$3}END{print s}'` && echo $1"
(disk_space_used, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `df -Pk " + analytics_dir + " | grep % | awk '{s+=$4}END{print s}'` && echo $1"
(disk_space_available, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
popen_cmd = "set `du -skL " + analytics_dir + " | awk '{s+=$1}END{print s}'` && echo $1"
(analytics_db_size, error_value) = \
Popen(popen_cmd, shell=True, stdout=PIPE).communicate()
self.fail_status_bits &= ~self.FAIL_STATUS_DISK_SPACE_NA
db_stat = DatabaseUsageStats()
db_info = DatabaseUsageInfo()
db_stat.disk_space_used_1k = int(disk_space_used)
db_stat.disk_space_available_1k = int(disk_space_available)
db_stat.analytics_db_size_1k = int(analytics_db_size)
db_info.name = socket.gethostname()
db_info.database_usage = [db_stat]
usage_stat = DatabaseUsage(data=db_info)
usage_stat.send()
else:
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
except:
sys.stderr.write("Failed to get database usage" + "\n")
self.fail_status_bits |= self.FAIL_STATUS_DISK_SPACE_NA
cassandra_cli_cmd = "cassandra-cli --host " + self.hostip + \
" --batch < /dev/null | grep 'Connected to:'"
proc = Popen(cassandra_cli_cmd, shell=True, stdout=PIPE, stderr=PIPE)
(output, errout) = proc.communicate()
if proc.returncode != 0:
self.fail_status_bits |= self.FAIL_STATUS_SERVER_PORT
else:
self.fail_status_bits &= ~self.FAIL_STATUS_SERVER_PORT
self.send_nodemgr_process_status()
# Record cluster status and shut down cassandra if needed
subprocess.Popen(["contrail-cassandra-status",
"--log-file", "/var/log/cassandra/status.log",
"--debug"])
# end database_periodic
def cassandra_repair(self):
subprocess.Popen(["contrail-cassandra-repair",
"--log-file", "/var/log/cassandra/repair.log",
"--debug"])
#end cassandra_repair
def send_disk_usage_info(self):
self.send_disk_usage_info_base(
NodeStatusUVE, NodeStatus, DiskPartitionUsageStats)
def runforever(self, test=False):
prev_current_time = int(time.time())
while 1:
# we explicitly use self.stdin, self.stdout, and self.stderr
# instead of sys.* so we can unit test this code
headers, payload = self.listener_nodemgr.wait(
self.stdin, self.stdout)
# self.stderr.write("headers:\n" + str(headers) + '\n')
# self.stderr.write("payload:\n" + str(payload) + '\n')
pheaders, pdata = childutils.eventdata(payload + '\n')
# self.stderr.write("pheaders:\n" + str(pheaders)+'\n')
# self.stderr.write("pdata:\n" + str(pdata))
# check for process state change events
if headers['eventname'].startswith("PROCESS_STATE"):
self.event_process_state(pheaders, headers)
# check for flag value change events
if headers['eventname'].startswith("PROCESS_COMMUNICATION"):
self.event_process_communication(pdata)
# do periodic events
if headers['eventname'].startswith("TICK_60"):
self.database_periodic()
prev_current_time = self.event_tick_60(prev_current_time)
# Perform nodetool repair every cassandra_repair_interval hours
if self.tick_count % (60 * self.cassandra_repair_interval) == 0:
self.cassandra_repair()
self.listener_nodemgr.ok(self.stdout)
|
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import ValidationError as MarshmallowValidationError
from rest_framework import fields, serializers
from rest_framework.exceptions import ValidationError
from django.db import IntegrityError
from coredb import operations
from coredb.abstracts.getter import get_run_model
from coredb.api.base.cloning import CloningMixin
from coredb.api.base.is_managed import IsManagedMixin
from coredb.api.base.pipeline import PipelineMixin
from coredb.api.base.settings import SettingsMixin
from coredb.managers.runs import create_run
from polyaxon.exceptions import PolyaxonException
from polyaxon.polyaxonfile import OperationSpecification
from polyaxon.schemas import V1RunPending
class RunSerializer(
serializers.ModelSerializer, CloningMixin, PipelineMixin, SettingsMixin
):
uuid = fields.UUIDField(format="hex", read_only=True)
original = fields.SerializerMethodField()
pipeline = fields.SerializerMethodField()
started_at = fields.DateTimeField(read_only=True)
finished_at = fields.DateTimeField(read_only=True)
settings = fields.SerializerMethodField()
class Meta:
model = get_run_model()
fields = (
"uuid",
"name",
"created_at",
"updated_at",
"started_at",
"finished_at",
"wait_time",
"duration",
"kind",
"runtime",
"meta_info",
"status",
"pipeline",
"original",
"is_managed",
"pending",
"inputs",
"outputs",
"tags",
"settings",
)
extra_kwargs = {
"is_managed": {"read_only": True},
}
class OfflineRunSerializer(
serializers.ModelSerializer,
):
uuid = fields.UUIDField(format="hex")
created_at = fields.DateTimeField()
class Meta:
model = get_run_model()
fields = (
"uuid",
"name",
"description",
"tags",
"created_at",
"updated_at",
"started_at",
"finished_at",
"wait_time",
"duration",
"kind",
"runtime",
"meta_info",
"status",
"status_conditions",
"is_managed",
"inputs",
"outputs",
)
def create(self, validated_data):
try:
obj = self.Meta.model.objects.create(**validated_data)
except IntegrityError:
raise ValidationError(
f"A run with uuid {validated_data.get('uuid')} already exists."
)
# Override auto-field for created_at
created_at = validated_data.get("created_at")
if created_at:
obj.created_at = created_at
obj.save()
return obj
class OperationCreateSerializer(serializers.ModelSerializer, IsManagedMixin):
uuid = fields.UUIDField(format="hex", read_only=True)
is_approved = fields.BooleanField(write_only=True, allow_null=True, required=False)
class Meta:
model = get_run_model()
fields = (
"uuid",
"name",
"description",
"content",
"is_managed",
"pending",
"meta_info",
"tags",
"is_approved",
)
extra_kwargs = {
"is_approved": {"write_only": True},
}
def validate(self, attrs):
attrs = super().validate(attrs)
self.check_if_entity_is_managed(attrs=attrs, entity_name="Run")
return attrs
def create(self, validated_data):
is_managed = validated_data["is_managed"]
content = validated_data.get("content")
meta_info = validated_data.get("meta_info") or {}
if content:
is_managed = True if is_managed is None else is_managed
if is_managed and not content:
raise ValidationError(
"Managed runs require a content with valid specification"
)
project_id = validated_data["project"].id
user = validated_data.get("user")
name = validated_data.get("name")
description = validated_data.get("description")
tags = validated_data.get("tags")
pending = validated_data.get("pending")
# Check the deprecated `is_approved` flag
if pending is None:
is_approved = validated_data.get("is_approved")
if is_approved is False:
pending = V1RunPending.UPLOAD
if is_managed or content:
try:
op_spec = OperationSpecification.read(content)
except Exception as e:
raise ValidationError(e)
if op_spec.is_template():
raise ValidationError(
"Received a template polyaxonfile, "
"Please customize the specification or disable the template."
)
try:
return operations.init_and_save_run(
project_id=project_id,
user_id=user.id if user else None,
op_spec=op_spec,
name=name,
description=description,
tags=tags,
meta_info=meta_info,
is_managed=is_managed,
pending=pending,
supported_kinds=validated_data.get("supported_kinds"),
supported_owners=validated_data.get("supported_owners"),
)
except (MarshmallowValidationError, PolyaxonException, ValueError) as e:
raise ValidationError(e)
else:
return create_run(
project_id=project_id,
user_id=user.id if user else None,
name=name,
description=description,
tags=tags,
meta_info=meta_info,
)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six.moves import range
import json, os
from semantic_version import Version
import frappe
import requests
import subprocess # nosec
from frappe.utils import cstr
from frappe.utils.gitutils import get_app_branch
from frappe import _, safe_decode
import git
def get_change_log(user=None):
if not user: user = frappe.session.user
last_known_versions = frappe._dict(json.loads(frappe.db.get_value("User",
user, "last_known_versions") or "{}"))
current_versions = get_versions()
if not last_known_versions:
update_last_known_versions()
return []
change_log = []
def set_in_change_log(app, opts, change_log):
from_version = last_known_versions.get(app, {}).get("version") or "0.0.1"
to_version = opts["version"]
if from_version != to_version:
app_change_log = get_change_log_for_app(app, from_version=from_version, to_version=to_version)
if app_change_log:
change_log.append({
"title": opts["title"],
"description": opts["description"],
"version": to_version,
"change_log": app_change_log
})
for app, opts in current_versions.items():
if app != "frappe":
set_in_change_log(app, opts, change_log)
if "frappe" in current_versions:
set_in_change_log("frappe", current_versions["frappe"], change_log)
return change_log
def get_change_log_for_app(app, from_version, to_version):
change_log_folder = os.path.join(frappe.get_app_path(app), "change_log")
if not os.path.exists(change_log_folder):
return
from_version = Version(from_version)
to_version = Version(to_version)
# remove pre-release part
to_version.prerelease = None
major_version_folders = ["v{0}".format(i) for i in range(from_version.major, to_version.major + 1)]
app_change_log = []
for folder in os.listdir(change_log_folder):
if folder in major_version_folders:
for file in os.listdir(os.path.join(change_log_folder, folder)):
version = Version(os.path.splitext(file)[0][1:].replace("_", "."))
if from_version < version <= to_version:
file_path = os.path.join(change_log_folder, folder, file)
content = frappe.read_file(file_path)
app_change_log.append([version, content])
app_change_log = sorted(app_change_log, key=lambda d: d[0], reverse=True)
# convert version to string and send
return [[cstr(d[0]), d[1]] for d in app_change_log]
@frappe.whitelist()
def update_last_known_versions():
frappe.db.set_value("User", frappe.session.user, "last_known_versions",
json.dumps(get_versions()), update_modified=False)
@frappe.whitelist()
def get_versions():
"""Get versions of all installed apps.
Example:
{
"frappe": {
"title": "Frappe Framework",
"version": "5.0.0"
}
}"""
versions = {}
for app in frappe.get_installed_apps(sort=True):
app_hooks = frappe.get_hooks(app_name=app)
versions[app] = {
"title": app_hooks.get("app_title")[0],
"description": app_hooks.get("app_description")[0],
"branch": get_app_branch(app)
}
if versions[app]['branch'] != 'master':
try:
app_repo = git.Repo(os.path.join('..', 'apps', '{}'.format(app)))
branch_version = '-'.join(app_repo.git.describe().split('-')[:2])
branch_version = [branch_version.strip('v')]
except:
branch_version = app_hooks.get('{0}_version'.format(versions[app]['branch']))
if branch_version:
versions[app]['branch_version'] = branch_version[0] + ' ({0})'.format(get_app_last_commit_ref(app))
try:
versions[app]["version"] = frappe.get_attr(app + ".__version__")
except AttributeError:
versions[app]["version"] = '0.0.1'
return versions
def get_app_branch(app):
'''Returns branch of an app'''
try:
result = subprocess.check_output('cd ../apps/{0} && git rev-parse --abbrev-ref HEAD'.format(app),
shell=True)
result = safe_decode(result)
result = result.strip()
return result
except Exception as e:
return ''
def get_app_last_commit_ref(app):
try:
result = subprocess.check_output('cd ../apps/{0} && git rev-parse HEAD --short 7'.format(app),
shell=True)
result = safe_decode(result)
result = result.strip()
return result
except Exception as e:
return ''
def check_for_update():
updates = frappe._dict(major=[], minor=[], patch=[])
apps = get_versions()
for app in apps:
app_details = check_release_on_github(app)
if not app_details: continue
github_version, org_name = app_details
# Get local instance's current version or the app
instance_version = Version(apps[app]['branch_version'].split(' ')[0])
# Compare and popup update message
for update_type in updates:
if github_version.__dict__[update_type] > instance_version.__dict__[update_type]:
updates[update_type].append(frappe._dict(
current_version = str(instance_version),
available_version = str(github_version),
org_name = org_name,
app_name = app,
title = apps[app]['title'],
))
break
if github_version.__dict__[update_type] < instance_version.__dict__[update_type]: break
add_message_to_redis(updates)
def parse_latest_non_beta_release(response):
"""
Pasrses the response JSON for all the releases and returns the latest non prerelease
Parameters
response (list): response object returned by github
Returns
json : json object pertaining to the latest non-beta release
"""
for release in response:
if release['prerelease'] == True: continue
return release
def check_release_on_github(app):
# Check if repo remote is on github
from subprocess import CalledProcessError
try:
remote_url = subprocess.check_output("cd ../apps/{} && git ls-remote --get-url".format(app), shell=True).decode()
except CalledProcessError:
# Passing this since some apps may not have git initializaed in them
return None
if isinstance(remote_url, bytes):
remote_url = remote_url.decode()
if "github.com" not in remote_url:
return None
# Get latest version from github
if 'https' not in remote_url:
return None
org_name = remote_url.split('/')[3]
r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(org_name, app))
if r.status_code == 200 and r.json():
lastest_non_beta_release = parse_latest_non_beta_release(r.json())
return Version(lastest_non_beta_release['tag_name'].strip('v')), org_name
else:
# In case of an improper response or if there are no releases
return None
def add_message_to_redis(update_json):
# "update-message" will store the update message string
# "update-user-set" will be a set of users
cache = frappe.cache()
cache.set_value("update-info", json.dumps(update_json))
user_list = [x.name for x in frappe.get_all("User", filters={"enabled": True})]
system_managers = [user for user in user_list if 'System Manager' in frappe.get_roles(user)]
cache.sadd("update-user-set", *system_managers)
@frappe.whitelist()
def show_update_popup():
cache = frappe.cache()
user = frappe.session.user
update_info = cache.get_value("update-info")
if not update_info:
return
updates = json.loads(update_info)
current_versions = get_versions()
# Check if user is int the set of users to send update message to
update_message = ""
if cache.sismember("update-user-set", user):
for update_type in updates:
release_links = ""
for app in updates[update_type]:
app = frappe._dict(app)
release_links += "<a href='https://github.com/{org_name}/{app_name}/releases/tag/v{available_version}'><b>{title}</b>: v{available_version}</a><br>".format(
available_version = app.available_version,
org_name = app.org_name,
app_name = app.app_name,
title = app.title
)
if release_links:
update_message += _("New {} releases for the following apps are available".format(update_type)) + ":<br><br>{}".format(release_links)
if update_message:
frappe.msgprint(update_message, title=_("New updates are available"), indicator='green')
cache.srem("update-user-set", user)
|
|
""" makeCSHOREinput.py
makeCSHOREinput(inpth1, inpth2, outpth, transect_id, erod, d50, fb, dconv):
---------------------------------------------------------------------------
This code creates the CSHORE infile for the set of storms specified in the
input folder
infile: This file will be the input file for CSHORE. This script
will write out the infile, along with creating folders for
each transect.
MFS 08-12-2014
MFS 09-24-2014 - Modified CSHORE input params: GAMMA, SLP, BLP
- Modified to not include sed params for non-eroding cases
10-02-2014 - Remove first day of storm for ramping period
11-17-2014 - Modified to filter out timesteps with ice or when dry
J.Dorvinen 03/22/2017 - converted to Python and refactored.
--------------------------------------------------------------------------
INPUT
inpth1 - input file path for transect (req units in input file: meters)
inpth2 - input file path for storms
outpth - output file path
transect_id - transect ID (hydroid)
erod - indicator if erodible profile (1=true, 0=false)
d50 - mean sediment grain size diameter D50 (mm)
fb - bottom friction factor (used if>0, otherwise default is 0.002)
dconv - conversion factor in meters added to storm water levels
OUTPUT
err - error code (=1 if successful)
--------------------------------------------------------------------------
Inputs/Files needed
--------------------------------------------------------------------------
profile*.txt: Profile file for transect with specified id. Profile
(inpth1) starting from Station 0 (offshore) and go to the most
inland point. The Stations and Elevations are in meteres.
The elevations have been normalized so the shoreline has
the elevation 0 m.
stormlist.txt List of storms for which input files will be created
(inpth2)
StormName_ID.txt: This file was created using the hydrograph extraction process,
(inpth2) and has the time series of water elevation, Hs, and Tp
for the storm duration
Format: |Time (s) |Water ele(m) | Hs (m) | Tp(s) |
--------------------------------------------------------------------------
"""
# Import modules
from __future__ import division
import os
from time import localtime, strftime
import sys
import getopt
import numpy as np
#from cshore_transects import TRANSECTS
inpth1='//surly.mcs.local/flood/02/NY/Chautauqua_Co_36013C/STUDY__TO90/TECHNICAL/ENG_FLOOD_HAZ_DEV/COASTAL/WAVE_MODELING/CSHORE/CSHORE_Infile_Creater/input'
inpth2='//surly.mcs.local/flood/02/NY/Chautauqua_Co_36013C/STUDY__TO90/TECHNICAL/ENG_FLOOD_HAZ_DEV/COASTAL/WAVE_MODELING/CSHORE/Hydrograph_stretching/output_fixing'
outpth='//surly.mcs.local/flood/02/NY/Chautauqua_Co_36013C/STUDY__TO90/TECHNICAL/ENG_FLOOD_HAZ_DEV/COASTAL/WAVE_MODELING/CSHORE/CSHORE_Infile_Creater/output_test'
transect_id = int('35') #35, 38
d50 = float('0.7')
fb = float('0.015') # Default CSHORE value is 0.002, got recommendation for 0.015 from USACE
erod = int('0')
dconv = float('174') # Lake Erie: 174, Lake Ontario: 74.2
# CSHORE execution and physical params
ILINE = 1 # 1 = single line
IPROFL = erod # 0 = no morph, 1 = run morph
ISEDAV = 0 # 0 = unlimited sand, 1 = hard bottom
IPERM = 0 # 0 = no permeability, 1 = permeable
IOVER = 1 # 0 = no overtopping , 1 = include overtopping
INFILT = 0 # 1 = include infiltration landward of dune crest
IWTRAN = 0 # 0 = no standing water landward of crest,
# 1 = wave transmission due to overtopping
IPOND = 0 # 0 = no ponding seaward of SWL
IWCINT = 1 # 0 = no Wave & Current interaction , 1 = include W & C interaction
IROLL = 1 # 0 = no roller, 1 = roller
IWIND = 0 # 0 = no wind effect
ITIDE = 0 # 0 = no tidal effect on currents
DX = 0.5 # constant dx
GAMMA = 0.5 # shallow water ratio of wave height to water depth
#SPORO = 0 # sediment porosity
D50 = d50 # d_50 in mm
SG = 2.65 # specific gravity of sediment
EFFB = 0.005 # suspension efficiency due to breaking eB
EFFF = 0.01 # suspension efficiency due to friction ef
SLP = 0.4 # suspended load parameter
#SLPOT = .1 # overtopping suspended load parameter
TANPHI = 0.63 # tangent (sediment friction angle)
BLP = 0.002 # bedload parameter
RWH = 0.015 # numerical rununp wire height
ILAB = 0 # controls the boundary condition timing. reading the input wave and water level data separately.
FRIC_FAC = fb # bottom friction factor
# Define constants
G = 9.81 # (m/s/s)
T = 2 # (deg C) assumed average temperature for storm season (Nov-April)
S = 0 # (salinity) assume freshwater
RHOS = SG*1000 # sediment density in kg/m**3
# Define functions
def getDensity(temperature=2, salinity=0):
""" Estimate water density from temperature and salinity
Approximation from VanRijn, L.C. (1993) Handbook for Sediment Transport
by Currents and Waves
where rho = density of water (kg/(m**3))
T = temperature (deg C)
S = salinity (o/oo) """
_cl = (salinity-0.03)/1.805 #VanRijn
if _cl < 0:
_cl = 0.0
rho = 1000 + 1.455*_cl - 6.5*10**(-3)*(temperature - 4 + (0.4*_cl))**2 # from VanRijn (1993)
return rho
def getKinematicViscosity(temperature=2):
"""Kinematic viscosity of water approximation from
VanRijn, L.C. (1989) Handbook of Sediment Transport
valid range approximately 0-35 deg C - JD 3/21/2017
kvis = kinematic viscosity (m**2/sec)
T = temperature (C) """
kvis = 1*10**(-6)*(1.14 - 0.031*(temperature-15) + 6.8*10**(-4)*(temperature-15)**2)
return kvis
def getFallVelocity(rho, kvis, rhos, d50):
"""compute fall velocity from D50 based on Soulsby's (1997) optimization.
adopted code from Jarrell Smith, USACE CHL, Vicksburg, MS
w = 10.36 * (kvis/d) * ((1 + 0.156*((s-1)*g*(d**3)))**0.5 - 1)
or
w = kvis/d* [sqrt(10.36^2 + 1.049 D^3) - 10.36]
where w = sediment fall velocity (m/s)
d = grain diameter (mm)
T = temperature (deg C)
S = Salinity (o/oo)"""
d50_m = d50/1000. #convert mm to m
s = rhos/rho
D = (G*(s-1) / kvis**2)**(1/3.) * d50_m
w = (kvis/d50_m) * ((10.36**2 + 1.049*D**3)**0.5 - 10.36)
return w
def makeCSHOREinput(inpth1, inpth2, outpth, transect_id, erod, d50, dconv=0):
""" Build CSHORE input file """
# Initialize 'err' variable
err = 1
# Standard error log string format
logline = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n"
# Calculate sediment fall velocity
rho = getDensity(T, S)
kvis = getKinematicViscosity(T)
wf = getFallVelocity(rho, kvis, RHOS, d50)
# Load in transect profile information that has been extracted from DEM
#profile = pd.read_csv(os.path.join(inpth1,'profile{}.txt'.format(transect_id)),
# delimiter=" ",
# header=None,
# names=["station", "elevation"])
# or could use numpy
profile = np.loadtxt(os.path.join(inpth1, 'profile{}.txt'.format(transect_id)))
# Create profile matrix, the third column is the bottom friction
# coefficient. Use default of 0.002 if not specified.
bottom_friction = profile.sum(1)[..., None]*0+FRIC_FAC
profile = np.append(profile, bottom_friction, axis=1)
# Load in storm/scenario list
storms = np.loadtxt(os.path.join(inpth2, 'stormlist.txt'))
with open(os.path.join(outpth, 'makeCSHOREinput.log'), 'a') as log:
log.write(logline.format('Datetime',
'Transect ID',
'Storm',
'Num of Timesteps',
'Valid Timesteps SWEL',
'Valid Timesteps Hs',
'Valid Timesteps Tp',
'Filtered Timesteps'))
# Initialize count
count = len(storms)
# Step through all storms
ii = 0
while ii < count and err == 1:
# For every scenario/storm, create the input files.
# Load hydrograph data
data = np.loadtxt(os.path.join(inpth2, '{}_{}.txt'.format(int(storms[ii]), transect_id)))
time = data[:, 0]
swel = data[:, 1] + dconv # Add conversion factor
height = data[:, 2]/np.sqrt(2) # convert Hs to Hrms
period = data[:, 3]
# Remove first day for ramping period
id_s = int(np.where(time == 86400)[0])
swel = swel[id_s:]
height = height[id_s:]
period = period[id_s:]
time = time[id_s:]-86400
# Check for good data values (> -100) in SWEL, Hs, and Tp. Filter out
# remaining
n_cnt = len(swel)
# Filter based on SWEL
ids_w = np.where(swel > -100)
swel = swel[ids_w]
height = height[ids_w]
period = period[ids_w]
time = time[ids_w]
# Filter based on height
ids_h = np.where(height > -100)
swel = swel[ids_h]
height = height[ids_h]
period = period[ids_h]
time = time[ids_h]
# Filter based on period
ids_t = np.where(period > -100)
swel = swel[ids_t]
height = height[ids_t]
period = period[ids_t]
time = time[ids_t]
# Find total dataseries length after filtering
filt_cnt = len(swel)
# If filtering has removed any data, print this in the log file
if filt_cnt < n_cnt:
tnow = localtime()
now = strftime("%Y-%m-%d %H:%M:%S", tnow)
log.write(logline.format(now,
transect_id,
storms[ii],
n_cnt,
len(ids_w[0]),
len(ids_h[0]),
len(ids_t[0]),
filt_cnt))
# If any valid time steps remain, write CSHORE input file
if filt_cnt > 0:
# Ensure first timestep is time=0 (required for CSHORE)
time[0] = 0
# SWEL data for CSHORE
cswel = np.vstack((time, swel)).T
# Move the wave data into variables in the format needed for CSHORE,
# timestep, wave period, wave height, wave direction (zeros)
cwave = np.vstack((time, period, height, height*0)).T
# Assign NSURGE, it is the length of the surge record ###minus 1.
nsurge = len(swel) -1
# Assign NWAVE, it is the length of the wave record ###minus 1.
nwave = len(cwave) -1
# Assign NBINP, it is the length of the profile record.
nbinp = len(profile)
# Write out some bits of the file header
str1 = '4\n'
str2 = '---------------------------------------------------------------------\n'
str3 = 'CSHORE input file for Transect{0}\n'.format(transect_id)
str4 = 'Storm: {0}, TR={1}\n'.format(int(storms[ii]), transect_id)
# assign standard heading
s01 = '{0:<42}->ILINE\n'.format(ILINE)
s02 = '{0:<42}->IPROFL\n'.format(IPROFL) # Movable bottom
s03 = '{0:<42}->ISEDAV\n'.format(ISEDAV) # unlimited sediment availability, if IPROFL = 1, ISEDAV must be specified.
s04 = '{0:<42}->IPERM \n'.format(IPERM) # Impermeable bottom
s05 = '{0:<42}->IOVER\n'.format(IOVER) # wave overtopping allowed
s06 = '{0:<42}->IWTRAN\n'.format(IWTRAN) # no standing water or wave transmission in a bay landward of dune. must be specified if IOVER = 1, although not applicable.
s07 = '{0:<42}->IPOND\n'.format(IPOND) #
s08 = '{0:<42}->INFILT\n'.format(INFILT)
s09 = '{0:<42}->IWCINT\n'.format(IWCINT) # wave and current interactions
s10 = '{0:<42}->IROLL\n'.format(IROLL) # roller effects in wet zone
s11 = '{0:<42}->IWIND\n'.format(IWIND) # No wind effects
s12 = '{0:<42}->ITIDE\n'.format(ITIDE)
s13 = '{0}{1:<37.3f}->DX\n'.format(' '*5, DX) # Constant nodal spacing
s14 = '{0}{1:<37.4f}->GAMMA\n'.format(' '*5, GAMMA) # empirical breaker ration.
s15 = '{0}{1:.1f}{0}{2:.6f}{0}{3:.4f}{4}->D50 WF SG\n'.format(' '*5, D50, wf, SG, ' '*9) # mean sediment diameter, sediment fall velocity, sediment specific gravity.
s16 = '{0}{1:.4f}{0}{2:.4f}{0}{3:.4f}{0}{4:.4f}{5}->EFFB EFFF SLP\n'.format(' '*5, EFFB, EFFF, SLP, 0.1, ' '*14) #suspension efficiency due to wave breaking, suspension efficiency due to btm friction, suspension load parameter
s17 = '{0}{1:.4f}{0}{2:.4f}{3}->TANPHI BLP\n'.format(' '*5, TANPHI, BLP, ' '*20) # sediment limiting (maximum) slope, bedload parameter. needed if IPROFL = 1.
s18 = '{0}{1:.3f}{2}->RWH \n'.format(' '*5, RWH, ' '*32) # runup wire height
s19 = '{0:<42}->ILAB \n'.format(ILAB) # reading the input wave and water level data separately.
# Create directory and infile for storm
tran_direct = os.path.join(outpth, 'TR{}'.format(transect_id))
if os.path.exists(tran_direct) is False:
os.mkdir(tran_direct)
directory = os.path.join(outpth, 'TR{}'.format(transect_id), str(int(storms[ii])))
if os.path.exists(directory) is False:
os.mkdir(directory)
with open(os.path.join(directory, 'infile'), 'w') as fid:
# Start writing out header to infile
fid.write(str1+str2+str3+str4+str2)
# Write standard heading
if erod == 1:
fid.write(s01+s02+s03+s04+s05+s06+s07+s08+s09+s10+s11+s12+s13+s14+s15+s16+s17+s18+s19)
else:
fid.write(s01+s02+s04+s05+s06+s07+s09+s10+s11+s12+s13+s14+s18+s19)
fid.write('{0:<42.0f}->NWAVE\n'.format(nwave))
fid.write('{0:<42.0f}->NSURGE\n'.format(nsurge))
# Print wave data
for j in range(len(cwave)):
fid.write('{0:>11.1f}{1:>11.2f}{2:>11.2f}{3:>11.2f}\n'.format(cwave[j, 0],
cwave[j, 1],
cwave[j, 2],
cwave[j, 3]))
# Print surge data
for j in range(len(cswel)):
fid.write('{0:.1f}{1:>11.2f} \n'.format(cswel[j, 0],
cswel[j, 1]))
# Print number of pts in transect file
fid.write('{0:>6.0f} -> NBINP \n'.format(nbinp))
# Print profile
for j in range(len(profile)):
fid.write('{0:.2f}{1:>15.2f}{2:>12.4f} \n'.format(profile[j, 0],
profile[j, 1],
profile[j, 2]))
# Increment counter
ii += 1
def main(argv):
""" main function """
makeCSHOREinput(inpth1, inpth2, outpth, transect_id, erod, d50, dconv)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
"""The Fronius integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
import logging
from typing import Final, TypeVar
from pyfronius import Fronius, FroniusError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_MODEL, ATTR_SW_VERSION, CONF_HOST, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, SOLAR_NET_ID_SYSTEM, FroniusDeviceInfo
from .coordinator import (
FroniusCoordinatorBase,
FroniusInverterUpdateCoordinator,
FroniusLoggerUpdateCoordinator,
FroniusMeterUpdateCoordinator,
FroniusOhmpilotUpdateCoordinator,
FroniusPowerFlowUpdateCoordinator,
FroniusStorageUpdateCoordinator,
)
_LOGGER: Final = logging.getLogger(__name__)
PLATFORMS: Final = [Platform.SENSOR]
FroniusCoordinatorType = TypeVar("FroniusCoordinatorType", bound=FroniusCoordinatorBase)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up fronius from a config entry."""
host = entry.data[CONF_HOST]
fronius = Fronius(async_get_clientsession(hass), host)
solar_net = FroniusSolarNet(hass, entry, fronius)
await solar_net.init_devices()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = solar_net
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
solar_net = hass.data[DOMAIN].pop(entry.entry_id)
while solar_net.cleanup_callbacks:
solar_net.cleanup_callbacks.pop()()
return unload_ok
class FroniusSolarNet:
"""The FroniusSolarNet class routes received values to sensor entities."""
def __init__(
self, hass: HomeAssistant, entry: ConfigEntry, fronius: Fronius
) -> None:
"""Initialize FroniusSolarNet class."""
self.hass = hass
self.cleanup_callbacks: list[Callable[[], None]] = []
self.config_entry = entry
self.coordinator_lock = asyncio.Lock()
self.fronius = fronius
self.host: str = entry.data[CONF_HOST]
# entry.unique_id is either logger uid or first inverter uid if no logger available
# prepended by "solar_net_" to have individual device for whole system (power_flow)
self.solar_net_device_id = f"solar_net_{entry.unique_id}"
self.system_device_info: DeviceInfo | None = None
self.inverter_coordinators: list[FroniusInverterUpdateCoordinator] = []
self.logger_coordinator: FroniusLoggerUpdateCoordinator | None = None
self.meter_coordinator: FroniusMeterUpdateCoordinator | None = None
self.ohmpilot_coordinator: FroniusOhmpilotUpdateCoordinator | None = None
self.power_flow_coordinator: FroniusPowerFlowUpdateCoordinator | None = None
self.storage_coordinator: FroniusStorageUpdateCoordinator | None = None
async def init_devices(self) -> None:
"""Initialize DataUpdateCoordinators for SolarNet devices."""
if self.config_entry.data["is_logger"]:
self.logger_coordinator = FroniusLoggerUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_logger_{self.host}",
)
await self.logger_coordinator.async_config_entry_first_refresh()
# _create_solar_net_device uses data from self.logger_coordinator when available
self.system_device_info = await self._create_solar_net_device()
_inverter_infos = await self._get_inverter_infos()
for inverter_info in _inverter_infos:
coordinator = FroniusInverterUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_inverter_{inverter_info.solar_net_id}_{self.host}",
inverter_info=inverter_info,
)
await coordinator.async_config_entry_first_refresh()
self.inverter_coordinators.append(coordinator)
self.meter_coordinator = await self._init_optional_coordinator(
FroniusMeterUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_meters_{self.host}",
)
)
self.ohmpilot_coordinator = await self._init_optional_coordinator(
FroniusOhmpilotUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_ohmpilot_{self.host}",
)
)
self.power_flow_coordinator = await self._init_optional_coordinator(
FroniusPowerFlowUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_power_flow_{self.host}",
)
)
self.storage_coordinator = await self._init_optional_coordinator(
FroniusStorageUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_storages_{self.host}",
)
)
async def _create_solar_net_device(self) -> DeviceInfo:
"""Create a device for the Fronius SolarNet system."""
solar_net_device: DeviceInfo = DeviceInfo(
configuration_url=self.fronius.url,
identifiers={(DOMAIN, self.solar_net_device_id)},
manufacturer="Fronius",
name="SolarNet",
)
if self.logger_coordinator:
_logger_info = self.logger_coordinator.data[SOLAR_NET_ID_SYSTEM]
# API v0 doesn't provide product_type
solar_net_device[ATTR_MODEL] = _logger_info.get("product_type", {}).get(
"value", "Datalogger Web"
)
solar_net_device[ATTR_SW_VERSION] = _logger_info["software_version"][
"value"
]
device_registry = await dr.async_get_registry(self.hass)
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
**solar_net_device,
)
return solar_net_device
async def _get_inverter_infos(self) -> list[FroniusDeviceInfo]:
"""Get information about the inverters in the SolarNet system."""
try:
_inverter_info = await self.fronius.inverter_info()
except FroniusError as err:
raise ConfigEntryNotReady from err
inverter_infos: list[FroniusDeviceInfo] = []
for inverter in _inverter_info["inverters"]:
solar_net_id = inverter["device_id"]["value"]
unique_id = inverter["unique_id"]["value"]
device_info = DeviceInfo(
identifiers={(DOMAIN, unique_id)},
manufacturer=inverter["device_type"].get("manufacturer", "Fronius"),
model=inverter["device_type"].get(
"model", inverter["device_type"]["value"]
),
name=inverter.get("custom_name", {}).get("value"),
via_device=(DOMAIN, self.solar_net_device_id),
)
inverter_infos.append(
FroniusDeviceInfo(
device_info=device_info,
solar_net_id=solar_net_id,
unique_id=unique_id,
)
)
return inverter_infos
@staticmethod
async def _init_optional_coordinator(
coordinator: FroniusCoordinatorType,
) -> FroniusCoordinatorType | None:
"""Initialize an update coordinator and return it if devices are found."""
try:
await coordinator.async_config_entry_first_refresh()
except ConfigEntryNotReady:
# ConfigEntryNotReady raised form FroniusError / KeyError in
# DataUpdateCoordinator if request not supported by the Fronius device
return None
# if no device for the request is installed an empty dict is returned
if not coordinator.data:
return None
return coordinator
|
|
import re
import urlparse
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import Link
from selenium.webdriver.common.action_chains import ActionChains
# from hubcheck.pageobjects.widgets.search import Search
class Header(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(Header,self).__init__(owner,locatordict)
# load hub's classes
Header_Locators = self.load_class('Header_Locators')
# update this object's locator
self.locators.update(Header_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.login = Link(self,{'base':'login'})
self.register = Link(self,{'base':'register'})
self.logout = Link(self,{'base':'logout'})
self.myaccount = Link(self,{'base':'myaccount'})
# self.search = Search(self,{'base':'search'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedOut(self):
widgets = [self.login,self.register]
self._checkLocators(widgets=widgets,cltype='LoggedOut')
def _checkLocatorsLoggedIn(self):
widgets = [self.logout,self.myaccount]
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def goto_login(self):
"""click the login link"""
return self.login.click()
def goto_register(self):
"""click the register link"""
return self.register.click()
def goto_logout(self):
"""click the logout link"""
self.logout.click()
message = 'logout button visible while trying to logout'
self.logout.wait_until_invisible(message)
return
def goto_myaccount(self):
"""click the link to go to the member's myaccount page"""
return self.myaccount.click()
def is_logged_in(self):
"""check if user is logged in, returns True or False"""
return self.logout.is_displayed()
def get_account_number(self):
"""return the user's account number based on the "My Account" url"""
url = self.myaccount.get_attribute('href')
if not url:
raise RuntimeError("link '%s' has no href" % (self.myaccount.locator))
path = urlparse.urlsplit(url)[2]
if not path:
raise RuntimeError("url '%s' has no path" % (url))
matches = re.search("/members/(\d+)",path)
if matches is None:
raise RuntimeError("path '%s' does not contain an account number" % (path))
account_number = matches.group(1)
return account_number
class Header_Locators_Base(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'search' : "css=#searchform",
}
class Header_Locators_Base_2(object):
"""locators for Header object"""
# https://manufacturinghub.org/login
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a:nth-child(1)",
'myaccount' : "css=#logout a:nth-child(2)",
'search' : "css=#searchform",
}
class Header_Locators_Base_3(object):
"""
locators for Header object
used on polytechhub
"""
locators = {
'base' : "css=#top",
'login' : "css=#account-login",
'register' : "css=#account-login",
'logout' : "css=#account-logout",
'myaccount' : "css=#account-info",
'search' : "css=#searchword",
}
class Header_Locators_Base_4(object):
"""
locators for Header object
used on nanohub
"""
locators = {
'base' : "css=#header",
'login' : "css=#login",
'register' : "css=#register",
'logout' : "css=#logout",
'myaccount' : "css=#usersname",
'search' : "css=#searchword",
}
class Header1(Header):
def __init__(self, owner, locatordict={}):
super(Header1,self).__init__(owner,locatordict)
# setup page object's additional components
self.profile = Link(self,{'base':'profile'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedIn(self):
widgets = [self.logout,self.myaccount,self.profile]
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def goto_profile(self):
"""click the link to go to the member's profile page"""
return self.profile.click()
class Header1_Locators_Base(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'profile' : "css=#username a",
'search' : "css=#searchform",
}
class Header1_Locators_Base_2(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'myaccount' : "css=#account-dashboard",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header1_Locators_Base_4(object):
"""locators for geoshareproject"""
locators = {
'base' : "css=#header",
'login' : "css=#searchlogin > p > a:nth-child(1)",
'register' : "css=#searchlogin > p > a:nth-child(2)",
'logout' : "css=#searchlogin > p > a:nth-child(3)",
'myaccount' : "css=#searchlogin > p > a:nth-child(2)",
'profile' : "css=#searchlogin > p > a:nth-child(1)",
'search' : "css=#searchForm",
}
class Header1_Locators_Base_5(object):
"""locators for Header object
login and register are the same link
"""
locators = {
'base' : "css=#header",
'login' : "css=#register a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'profile' : "css=#username a",
'search' : "css=#searchform",
}
class Header2(BasePageWidget):
"""
represents header on hubs that use a javascripty dropdown
menu to hold account links for dashboard, profile, messages
and logout.
"""
def __init__(self, owner, locatordict={}):
super(Header2,self).__init__(owner,locatordict)
# load hub's classes
Header_Locators = self.load_class('Header_Locators')
# update this object's locator
self.locators.update(Header_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self.login = Link(self,{'base':'login'})
self.register = Link(self,{'base':'register'})
self.logout = Link(self,{'base':'logout'})
self.details = Link(self,{'base':'details'})
self.dashboard = Link(self,{'base':'dashboard'})
self.messages = Link(self,{'base':'messages'})
self.profile = Link(self,{'base':'profile'})
# self.search = Search(self,'search')
self._links = ['details','dashboard','messages','profile','logout']
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedOut(self,widgets=None,cltype=""):
widgets = [self.login]
self._checkLocators(widgets=widgets,cltype='LoggedOut')
def _checkLocatorsLoggedIn(self,widgets=None,cltype=""):
widgets = [self.logout,self.dashboard,
self.messages,self.profile]
base = self.owner.find_element(self.locators['acctbase'])
# hover mouse over the group manager toolbar to expand it
actionProvider = ActionChains(self.owner._browser)\
.move_to_element(base)
actionProvider.perform()
# check for locators
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def get_options_items(self):
return self._links
def goto_options_item(self,link):
"""this function does selenium specific stuff"""
if not link in self._links:
raise ValueError("invalid link name: '%s'",link)
# hover mouse over the account toolbar to expand it
# move the mouse to the correct link and click it
menu = self.find_element(self.locators['acctbase'])
loc = self.locators[link]
menu_item = self.find_element(loc)
self.logger.debug("moving mouse over account dropdown")
self.logger.debug("clicking drowdown menu option '%s': %s" % (link,loc))
actionProvider = ActionChains(self.owner._browser)\
.move_to_element(menu)\
.move_to_element(menu_item)\
.click()
actionProvider.perform()
def goto_login(self):
return self.login.click()
def goto_register(self):
return self.register.click()
def goto_logout(self):
lockey = 'logout'
self.goto_options_item(lockey)
# wait until the element is no longer visible (ie. the menu has closed)
# before proceeding to the next task
loc = self.locators[lockey]
self.wait_until_not_present(locator=loc)
def goto_myaccount(self):
# deprecated function, use goto_dashboard() instead
return self.goto_options_item('dashboard')
def goto_dashboard(self):
return self.goto_options_item('dashboard')
def goto_messages(self):
return self.goto_options_item('messages')
def goto_profile(self):
return self.goto_options_item('profile')
def is_logged_in(self):
"""check if user is logged in, returns True or False"""
# return not self.login.is_displayed()
return self.logout.is_present()
def get_account_number(self):
"""return the user's account number based on the "Username" url"""
url = None
# use dashboard instead of details because some hubs (like catalyzecare)
# don't make details a link.
url = self.dashboard.get_attribute('href')
if url is None:
raise RuntimeError("link '%s' has no href" \
% (self.details.locators['base']))
path = urlparse.urlsplit(url)[2]
if not path:
raise RuntimeError("url '%s' has no path" % (url))
# the url looks something like:
# https://hubname.org/members/1234/dashboard
matches = re.search("/members/(\d+)",path)
if matches is None:
raise RuntimeError("path '%s' does not contain an account number" \
% (path))
account_number = matches.group(1)
return account_number
class Header2_Locators_Base(object):
"""locators for Header2 object"""
locators = {
'base' : "css=#header",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'details' : "css=#account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_2(object):
"""locators for Header2 object"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_3(object):
"""locators for Header2 object
these are used in afrl
"""
locators = {
'base' : "css=#utilities",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_4(object):
"""locators for Header2 object"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#login",
'register' : "css=#register",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_5(object):
"""locators for Header2 object
login and register is one link
"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-login",
'logout' : "css=#account-logout",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard",
'messages' : "css=#account-messages",
'profile' : "css=#account-profile",
'search' : "css=#searchform",
}
class Header2_Locators_Base_6(object):
"""locators for Header2 object
login and register is one link
updated locators to include anchor
"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#account-login",
'register' : "css=#account-login",
'logout' : "css=#account-logout a",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard a",
'messages' : "css=#account-messages a",
'profile' : "css=#account-profile a",
'search' : "css=#searchform",
}
class Header2_Locators_Base_7(object):
"""locators for Header2 object
separate login and register links
updated locators to include anchor
"""
locators = {
'base' : "css=#masthead",
'acctbase' : "css=#account",
'login' : "css=#login",
'register' : "css=#register",
'logout' : "css=#account-logout a",
'details' : "css=.account-details",
'dashboard' : "css=#account-dashboard a",
'messages' : "css=#account-messages a",
'profile' : "css=#account-profile a",
'search' : "css=#searchform",
}
class Header3(Header):
"""
represents header on hubs where the username and my account links
lead to the my account/dashboard page, and there is no profile link.
generally found in older templates. here we use the username link
to get the account number
"""
def __init__(self, owner, locatordict={}):
super(Header3,self).__init__(owner,locatordict)
# setup page object's additional components
self.username = Link(self,{'base':'username'})
# update the component's locators with this objects overrides
self._updateLocators()
def _checkLocatorsLoggedIn(self):
widgets = [self.logout,self.myaccount,self.username]
self._checkLocators(widgets=widgets,cltype='LoggedIn')
def goto_username(self):
"""click the username link to go to the member's account page"""
return self.username.click()
def get_account_number(self):
"""return the user's account number based on the "Username" link"""
url = self.username.get_attribute('href')
if not url:
raise RuntimeError("link '%s' has no href" % (self.username.locator))
path = urlparse.urlsplit(url)[2]
if not path:
raise RuntimeError("url '%s' has no path" % (url))
matches = re.search("/members/(\d+)",path)
if matches is None:
raise RuntimeError("path '%s' does not contain an account number" % (path))
account_number = matches.group(1)
return account_number
class Header3_Locators_Base_1(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'username' : "css=#username a",
'search' : "css=#searchform",
}
class Header3_Locators_Base_2(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#login a",
'register' : "css=#register a",
'logout' : "css=#logout a",
'myaccount' : "css=#myaccount a",
'username' : "css=#usersname a",
'search' : "css=#searchform",
}
class Header3_Locators_Base_3(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#who > a:nth-child(1)",
'register' : "css=#who > a:nth-child(2)",
'logout' : "css=#account > a:nth-child(1)",
'myaccount' : "css=#account > a:nth-child(2)",
'username' : "css=#who > a:nth-child(1)",
'search' : "css=#sitesearch",
}
class Header3_Locators_Base_4(object):
"""locators for Header object"""
locators = {
'base' : "css=#header",
'login' : "css=#account-login",
'register' : "css=#account-register",
'logout' : "css=#account-logout",
'myaccount' : "css=#account-dashboard",
'username' : "css=#username",
'search' : "css=#sitesearch",
}
|
|
# /test/testutil.py
#
# Some utility functions which make testing easier
#
# See /LICENCE.md for Copyright information
"""Some utility functions which make testing easier."""
import atexit
import os
import platform
import shutil
import socket
import subprocess
import sys
import tempfile
from contextlib import contextmanager
import ciscripts.bootstrap as bootstrap
import ciscripts.util as util
from nose_parameterized import parameterized
from six import StringIO
from testtools import TestCase
from testtools.content import text_content
from testtools.matchers import Mismatch
__file__ = os.path.abspath(__file__)
# Disabled task caching in the util module - if these tests
# are run in parallel we don't want other tests to cause our
# tests to skip certain (important!) tasks or return the
# wrong container dir.
setattr(util, "_NO_TASK_CACHING", True)
class CapturedOutput(object): # suppress(too-few-public-methods)
"""Represents the captured contents of stdout and stderr."""
def __init__(self):
"""Initialize the class."""
super(CapturedOutput, self).__init__()
self.stdout = ""
self.stderr = ""
self._stdout_handle = None
self._stderr_handle = None
def __enter__(self):
"""Start capturing output."""
self._stdout_handle = sys.stdout
self._stderr_handle = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
return self
def __exit__(self, exc_type, value, traceback):
"""Finish capturing output."""
del exc_type
del value
del traceback
sys.stdout.seek(0)
self.stdout = sys.stdout.read()
sys.stderr.seek(0)
self.stderr = sys.stderr.read()
sys.stdout = self._stdout_handle
self._stdout_handle = None
sys.stderr = self._stderr_handle
self._stderr_handle = None
@contextmanager
def environment_copy():
"""Execute scope with its own os.environ.
os.environ will be restored after the scope
exits.
"""
environ_copy = os.environ.copy()
try:
yield os.environ
finally:
os.environ = environ_copy
@contextmanager
def in_dir(directory):
"""Execute in the context of directory."""
last_cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(last_cwd)
@contextmanager
def in_tempdir(parent, prefix):
"""Create a temporary directory as a context manager."""
directory = tempfile.mkdtemp(prefix, dir=parent)
try:
with in_dir(directory):
yield directory
finally:
util.force_remove_tree(directory)
@contextmanager
def server_in_tempdir(parent, prefix):
"""Create a server in a temporary directory, shutting down on exit."""
import threading
from six.moves import socketserver # suppress(import-error)
from six.moves import SimpleHTTPServer # suppress(import-error)
with in_tempdir(parent, prefix) as temp_dir:
class QuietHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler,
object):
"""Custom SimpleHTTPRequestHandler, does not log messages."""
def log_message(self, message, *args):
"""Ignore message."""
pass
def do_GET(self): # suppress(N802)
"""Change into temp_dir and then chain up.
The reason why we need to do this is that the
underlying SimpleHTTPRequestHandler object queries the
directory that we are currently in, as opposed to the
directory that the server was created in. If the user
changes their active directory (as is done in the tests)
then requests will be resolved relative to that directory,
which is an error.
"""
with in_dir(temp_dir):
return super(QuietHTTPHandler, self).do_GET()
server = socketserver.TCPServer(("localhost", 0), QuietHTTPHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
try:
yield (temp_dir, "{0}:{1}".format(server.server_address[0],
server.server_address[1]))
finally:
server.shutdown()
thread.join()
def _build_http_connection(superclass, resolver):
"""Build a connection handler for superclass, resolving with resolver."""
class Connection(superclass): # suppress(too-few-public-methods)
"""A connection that resolves with resolver."""
def __init__(self, *args, **kwargs):
"""Initialize this connection object."""
superclass.__init__(self, *args, **kwargs)
self.sock = None
def connect(self):
"""Create a connection, resolving using resolver."""
self.sock = socket.create_connection(resolver(self.host,
self.port),
self.timeout)
return Connection
@contextmanager
def overridden_dns(dns_map):
"""Context manager to override the urllib HTTP DNS resolution."""
from six.moves import http_client # suppress(import-error)
from six.moves import urllib # suppress(import-error)
def resolver(host, port):
"""If host is in dns_map, use host from map, otherwise pass through."""
try:
entry = dns_map[host].split(":")
if len(entry) == 1:
return (entry[0], port)
else:
assert len(entry) == 2
return (entry[0], entry[1])
except KeyError:
return (host, port)
http_connection = _build_http_connection(http_client.HTTPConnection,
resolver)
https_connection = _build_http_connection(http_client.HTTPSConnection,
resolver)
http_hnd = type("HTTPHandler",
(urllib.request.HTTPHandler, object),
{"http_open": lambda s, r: s.do_open(http_connection, r)})
https_hnd = type("HTTPHandler",
(urllib.request.HTTPSHandler, object),
{"https_open": lambda s, r: s.do_open(https_connection,
r)})
custom_opener = urllib.request.build_opener(http_hnd, https_hnd)
urllib.request.install_opener(custom_opener)
try:
yield
finally:
urllib.request.install_opener(urllib.request.build_opener())
class IsInSubdirectoryOf(object): # suppress(too-few-public-methods)
"""Match if a path is a subdirectory of the specified path."""
def __init__(self, path):
"""Initialize mather for this path."""
super(IsInSubdirectoryOf, self).__init__()
self._path = path
def __str__(self):
"""Represent matcher as string."""
return "IsInSubdirectoryOf({0})".format(repr(self._path))
def match(self, candidate):
"""Return Mismatch if candidate is not in a subdirectory of path."""
if not candidate:
return Mismatch("""None passed to match""")
path = self._path
if os.path.commonprefix([os.path.realpath(path).lower(),
os.path.realpath(candidate).lower()]):
return None
else:
return Mismatch("{0} is not in a subdir of {1}".format(candidate,
path))
class SubprocessExitWithMismatch(object):
"""Detail of a SubprocessExitsWith mismatch."""
# suppress(too-many-arguments)
def __init__(self, popen_command, code, expected, stdout, stderr):
"""Initialize this mismatch detail object."""
super(SubprocessExitWithMismatch, self).__init__()
command = " ".join(popen_command)
self._msg = "{0} exited with {1}, but expected {2}".format(command,
code,
expected)
self._details = {
"Output": text_content(stdout),
"Errors": text_content(stderr)
}
def describe(self):
"""Return message."""
return self._msg
def get_details(self):
"""Return details."""
return self._details
class SubprocessExitsWith(object): # suppress(too-few-public-methods)
"""Match if the subprocess to be executed exits with the expected code."""
def __init__(self, expected_code):
"""Initialize matcher for this expected code."""
super(SubprocessExitsWith, self).__init__()
self._expected_code = expected_code
def __str__(self):
"""Convert matcher to string."""
return "SubprocessExitsWith({0})".format(self._expected_code)
def match(self, subprocess_args):
"""Fail if subprocess exits with unexpected code."""
process = subprocess.Popen(subprocess_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
code = process.wait()
if code != self._expected_code:
return SubprocessExitWithMismatch(subprocess_args,
code,
self._expected_code,
process.stdout.read().decode(),
process.stderr.read().decode())
class CIScriptExitsWith(object): # suppress(too-few-public-methods)
"""Match if the specified ci-script runs with its arguments."""
def __init__(self, expected_status, container, util_mod, *args, **kwargs):
"""Initialize matcher with the arguments we run the script with."""
super(CIScriptExitsWith, self).__init__()
self._expected_status = expected_status
self._args = args
self._kwargs = kwargs
self._container = container
self._util = util_mod
def __str__(self):
"""Represent this matcher as a string."""
return "CIScriptExitsWith({0})".format(", ".join(self._args))
def match(self, script):
"""Match if this script runs successfully."""
captured_output = CapturedOutput()
assert self._container.return_code() == 0
with captured_output:
with environment_copy():
run_args = [
self._container,
self._util,
None,
list(self._args)
]
run_kwargs = self._kwargs
self._container.fetch_and_import(script).run(*run_args,
**run_kwargs)
result = self._container.return_code()
if result != self._expected_status:
return SubprocessExitWithMismatch(["python", script] +
list(self._args),
result,
self._expected_status,
captured_output.stdout,
captured_output.stderr)
def format_with_args(*args):
"""Return a function that formats a docstring."""
def formatter(func, _, params):
"""Return formatted docstring with argument numbers in args."""
pa = params.args
format_args = [pa[i] for i in range(0, len(pa)) if i in args]
return func.__doc__.format(*format_args)
return formatter
_ROOT = os.path.abspath(os.path.join(os.path.dirname(bootstrap.__file__),
".."))
WHICH_SCRIPT = ("import sys;sys.path.append('" + _ROOT.replace("\\",
"/") + "');"
"import ciscripts.util;assert ciscripts.util.which('{0}')")
def _copytree_ignore_notfound(src, dst):
"""Copy an entire directory tree.
This is effectively a workaround for situations where shutil.copytree
is unable to copy some files. Just shell out to rsync, as rsync
usually always gets it right in complicated cases.
Where rsync isn't available, then we'll need to fallback to shutil.
"""
if util.which("rsync"):
subprocess.check_call(["rsync",
"-az",
"--chmod=ugo=rwX",
src + os.path.sep,
dst + os.path.sep])
else:
try:
shutil.copytree(src, dst)
except shutil.Error: # suppress(pointless-except)
pass
def copy_scripts_to_directory(target):
"""Utility method to copy CI script to current directory.
They will be located at /ciscripts/.
"""
parent = os.path.realpath(os.path.join(os.path.dirname(__file__),
".."))
assert "ciscripts" in os.listdir(parent)
shutil.copytree(os.path.join(parent, "ciscripts"),
os.path.join(target, "ciscripts"))
_ACCEPTANCE_TEST_DIRS_TO_REMOVE = set()
def _remove_acceptance_test_dirs():
"""Remove all acceptance tests directories.
Designed to be called on an atexit handler.
"""
for directory in _ACCEPTANCE_TEST_DIRS_TO_REMOVE:
util.force_remove_tree(directory)
def acceptance_test_for(project_type, expected_programs):
"""Generate acceptance test class for :project_type:.
Includes tests to ensure that :expected_programs: are
installed in the container.
"""
class AcceptanceTestForProject(TestCase):
"""Test cases for setting up a project container."""
def __init__(self, *args, **kwargs):
"""Initialize the instance attributes for this test case."""
super(AcceptanceTestForProject, self).__init__(*args, **kwargs)
self.project_dir = None
@contextmanager
def in_parent_context(self, command):
"""Get script to run command in parent context.
The 'parent context' in this case is a shell script where the
standard output of the container's setup script has been evaluated,
eg, all environment variables are exported.
"""
directory = tempfile.mkdtemp(prefix=os.path.join(os.getcwd(),
"parent_script"))
script_path = os.path.abspath(os.path.join(directory,
"script.ps1"))
script_path_for_shell = os.path.abspath(script_path)
if platform.system() == "Windows":
shell = ["powershell", "-ExecutionPolicy", "Bypass"]
script_path_for_shell = "\"{}\"".format(script_path_for_shell)
else:
shell = ["bash"]
script = ("{cls.setup_container_output.stdout}"
"{command}").format(cls=self.__class__, command=command)
try:
with util.in_dir(directory):
with open(script_path, "w") as script_file:
script_file.write(script)
# powershell requires that paths with spaces be
# quoted, even when passed as part of the command line
# arguments, so we use script_path here as it is formatted
# above
yield shell + [script_path_for_shell]
finally:
script_file.close()
util.force_remove_tree(directory)
@classmethod
def setup_script(cls):
"""Setup script for this acceptance test fixture."""
return "setup/cmake/setup.py"
@classmethod
def _setup_container(cls,
container_temp_dir,
scripts_dir):
"""Create a container and return it and its util module."""
if platform.system() == "Windows":
shell = bootstrap.construct_parent_shell("powershell",
sys.stdout)
else:
shell = bootstrap.construct_parent_shell("bash",
sys.stdout)
container = bootstrap.ContainerDir(shell,
container_temp_dir,
scripts_directory=scripts_dir)
util_mod = container.fetch_and_import("util.py")
# Look up where to print messages to at the time messages
# are printed, such that we get the redirected messages
# from sys.stderr
util_mod.PRINT_MESSAGES_TO = None
return (container, util_mod, shell)
@classmethod
def maybe_copy_from_existing_container(cls, target):
"""Copy from an any pre-existing container if we have one."""
if os.environ.get("CONTAINER_DIR"):
container_dir = os.environ["CONTAINER_DIR"]
util.force_remove_tree(target)
_copytree_ignore_notfound(container_dir, target)
# Delete ciscripts in the copied container
try:
util.force_remove_tree(os.path.join(target, "_scripts"))
except (shutil.Error, OSError): # suppress(pointless-except)
pass
@classmethod
def _register_removal_atexit(cls, temp_dir): # suppress(N802)
"""Register the atexit handler to remove directories."""
if len(_ACCEPTANCE_TEST_DIRS_TO_REMOVE) == 0:
atexit.register(_remove_acceptance_test_dirs)
_ACCEPTANCE_TEST_DIRS_TO_REMOVE.add(temp_dir)
@classmethod
def setUpClass(cls): # suppress(N802)
"""Call container setup script."""
temp_dir_prefix = "{}_acceptance_test".format(project_type)
cls.container_temp_dir = tempfile.mkdtemp(dir=os.getcwd(),
prefix=temp_dir_prefix)
cls._register_removal_atexit(cls.container_temp_dir)
cls._environ_backup = os.environ.copy()
cls.maybe_copy_from_existing_container(cls.container_temp_dir)
scripts_directory = os.path.join(cls.container_temp_dir,
"_scripts")
copy_scripts_to_directory(scripts_directory)
setup_script = "setup/{type}/setup.py".format(type=project_type)
cls.setup_container_output = CapturedOutput()
try:
with cls.setup_container_output:
(cls.container,
cls.util,
shell) = cls._setup_container(cls.container_temp_dir,
scripts_directory)
extra_args = list()
# Don't install mdl on AppVeyor - installing any gem is
# far too slow and will cause the job to time out.
if os.environ.get("APPVEYOR", None):
extra_args.append("--no-mdl")
setup_module = cls.container.fetch_and_import(setup_script)
cls.lang_container = setup_module.run(cls.container,
util,
shell,
extra_args)
assert cls.container.return_code() == 0
except: # suppress(blind-except,B901)
stdout = cls.setup_container_output.stdout.read()
stderr = cls.setup_container_output.stderr.read()
msg = ("""Exception occurred: {}\n"""
"""Output\n======\n{}\n\n"""
"""Errors\n======\n{}\n\n""").format(sys.exc_info()[0],
stdout,
stderr)
raise RuntimeError(msg)
@classmethod
def tearDownClass(cls): # suppress(N802)
"""Remove container."""
os.environ = cls._environ_backup
def _get_project_template(self): # suppress(no-self-use)
"""Get template of project type from /sample."""
parent = os.path.realpath(os.path.join(os.path.dirname(__file__),
".."))
assert "sample" in os.listdir(parent)
assert project_type in os.listdir(os.path.join(parent, "sample"))
return os.path.join(parent, "sample", project_type)
def setUp(self): # suppress(N802)
"""Create a copy of and enter sample project directory."""
super(AcceptanceTestForProject, self).setUp()
# Create copy of the sample project
current_directory = os.getcwd()
temp_dir_prefix = "{}_project_copy".format(project_type)
project_copy_temp_dir = tempfile.mkdtemp(dir=current_directory,
prefix=temp_dir_prefix)
self.addCleanup(util.force_remove_tree, project_copy_temp_dir)
self.project_dir = os.path.join(project_copy_temp_dir,
project_type)
shutil.copytree(self._get_project_template(), self.project_dir)
os.chdir(self.project_dir)
self.addCleanup(os.chdir, current_directory)
self.__class__.container.reset_failure_count()
_PROGRAMS = [
"polysquare-generic-file-linter"
]
if not os.environ.get("APPVEYOR", None):
_PROGRAMS.append("mdl")
_PROGRAMS.extend(expected_programs)
@parameterized.expand(_PROGRAMS, testcase_func_doc=format_with_args(0))
def test_program_is_available_in_python_script(self, program):
"""Executable {0} is available after running setup."""
temp_dir = self.__class__.container_temp_dir
with self.__class__.lang_container.activated(util):
self.assertThat(util.which(program),
IsInSubdirectoryOf(temp_dir))
return AcceptanceTestForProject
|
|
from __future__ import absolute_import
import atexit
import copy
import logging
import threading
import time
from ..client_async import KafkaClient
from ..structs import TopicPartition
from ..partitioner.default import DefaultPartitioner
from ..protocol.message import Message, MessageSet
from .. import errors as Errors
from .future import FutureRecordMetadata, FutureProduceResult
from .record_accumulator import AtomicInteger, RecordAccumulator
from .sender import Sender
log = logging.getLogger(__name__)
PRODUCER_CLIENT_ID_SEQUENCE = AtomicInteger()
class KafkaProducer(object):
"""A Kafka client that publishes records to the Kafka cluster.
The producer is thread safe and sharing a single producer instance across
threads will generally be faster than having multiple instances.
The producer consists of a pool of buffer space that holds records that
haven't yet been transmitted to the server as well as a background I/O
thread that is responsible for turning these records into requests and
transmitting them to the cluster.
The send() method is asynchronous. When called it adds the record to a
buffer of pending record sends and immediately returns. This allows the
producer to batch together individual records for efficiency.
The 'acks' config controls the criteria under which requests are considered
complete. The "all" setting will result in blocking on the full commit of
the record, the slowest but most durable setting.
If the request fails, the producer can automatically retry, unless
'retries' is configured to 0. Enabling retries also opens up the
possibility of duplicates (see the documentation on message
delivery semantics for details:
http://kafka.apache.org/documentation.html#semantics
).
The producer maintains buffers of unsent records for each partition. These
buffers are of a size specified by the 'batch_size' config. Making this
larger can result in more batching, but requires more memory (since we will
generally have one of these buffers for each active partition).
By default a buffer is available to send immediately even if there is
additional unused space in the buffer. However if you want to reduce the
number of requests you can set 'linger_ms' to something greater than 0.
This will instruct the producer to wait up to that number of milliseconds
before sending a request in hope that more records will arrive to fill up
the same batch. This is analogous to Nagle's algorithm in TCP. Note that
records that arrive close together in time will generally batch together
even with linger_ms=0 so under heavy load batching will occur regardless of
the linger configuration; however setting this to something larger than 0
can lead to fewer, more efficient requests when not under maximal load at
the cost of a small amount of latency.
The buffer_memory controls the total amount of memory available to the
producer for buffering. If records are sent faster than they can be
transmitted to the server then this buffer space will be exhausted. When
the buffer space is exhausted additional send calls will block.
The key_serializer and value_serializer instruct how to turn the key and
value objects the user provides into bytes.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the producer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client.
Default: 'kafka-python-producer-#' (appended with a unique number
per instance)
key_serializer (callable): used to convert user-supplied keys to bytes
If not None, called as f(key), should return bytes. Default: None.
value_serializer (callable): used to convert user-supplied message
values to bytes. If not None, called as f(value), should return
bytes. Default: None.
acks (0, 1, 'all'): The number of acknowledgments the producer requires
the leader to have received before considering a request complete.
This controls the durability of records that are sent. The
following settings are common:
0: Producer will not wait for any acknowledgment from the server.
The message will immediately be added to the socket
buffer and considered sent. No guarantee can be made that the
server has received the record in this case, and the retries
configuration will not take effect (as the client won't
generally know of any failures). The offset given back for each
record will always be set to -1.
1: Wait for leader to write the record to its local log only.
Broker will respond without awaiting full acknowledgement from
all followers. In this case should the leader fail immediately
after acknowledging the record but before the followers have
replicated it then the record will be lost.
all: Wait for the full set of in-sync replicas to write the record.
This guarantees that the record will not be lost as long as at
least one in-sync replica remains alive. This is the strongest
available guarantee.
If unset, defaults to acks=1.
compression_type (str): The compression type for all data generated by
the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
Compression is of full batches of data, so the efficacy of batching
will also impact the compression ratio (more batching means better
compression). Default: None.
retries (int): Setting a value greater than zero will cause the client
to resend any record whose send fails with a potentially transient
error. Note that this retry is no different than if the client
resent the record upon receiving the error. Allowing retries will
potentially change the ordering of records because if two records
are sent to a single partition, and the first fails and is retried
but the second succeeds, then the second record may appear first.
Default: 0.
batch_size (int): Requests sent to brokers will contain multiple
batches, one for each partition with data available to be sent.
A small batch size will make batching less common and may reduce
throughput (a batch size of zero will disable batching entirely).
Default: 16384
linger_ms (int): The producer groups together any records that arrive
in between request transmissions into a single batched request.
Normally this occurs only under load when records arrive faster
than they can be sent out. However in some circumstances the client
may want to reduce the number of requests even under moderate load.
This setting accomplishes this by adding a small amount of
artificial delay; that is, rather than immediately sending out a
record the producer will wait for up to the given delay to allow
other records to be sent so that the sends can be batched together.
This can be thought of as analogous to Nagle's algorithm in TCP.
This setting gives the upper bound on the delay for batching: once
we get batch_size worth of records for a partition it will be sent
immediately regardless of this setting, however if we have fewer
than this many bytes accumulated for this partition we will
'linger' for the specified time waiting for more records to show
up. This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
would have the effect of reducing the number of requests sent but
would add up to 5ms of latency to records sent in the absense of
load. Default: 0.
partitioner (callable): Callable used to determine which partition
each message is assigned to. Called (after key serialization):
partitioner(key_bytes, all_partitions, available_partitions).
The default partitioner implementation hashes each non-None key
using the same murmur2 algorithm as the java client so that
messages with the same key are assigned to the same partition.
When a key is None, the message is delivered to a random partition
(filtered to partitions with available leaders only, if possible).
buffer_memory (int): The total bytes of memory the producer should use
to buffer records waiting to be sent to the server. If records are
sent faster than they can be delivered to the server the producer
will block up to max_block_ms, raising an exception on timeout.
In the current implementation, this setting is an approximation.
Default: 33554432 (32MB)
max_block_ms (int): Number of milliseconds to block during send() and
partitions_for(). These methods can be blocked either because the
buffer is full or metadata unavailable. Blocking in the
user-supplied serializers or partitioner will not be counted against
this timeout. Default: 60000.
max_request_size (int): The maximum size of a request. This is also
effectively a cap on the maximum record size. Note that the server
has its own cap on record size which may be different from this.
This setting will limit the number of record batches the producer
will send in a single request to avoid sending huge requests.
Default: 1048576.
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): flag to configure whether ssl handshake
should verify that the certificate matches the brokers hostname.
default: true.
ssl_cafile (str): optional filename of ca file to use in certificate
veriication. default: none.
ssl_certfile (str): optional filename of file in pem format containing
the client certificate, as well as any ca certificates needed to
establish the certificate's authenticity. default: none.
ssl_keyfile (str): optional filename containing the client private key.
default: none.
api_version (str): specify which kafka API version to use.
If set to 'auto', will attempt to infer the broker version by
probing various APIs. Default: auto
Note:
Configuration parameters are described in more detail at
https://kafka.apache.org/090/configuration.html#producerconfigs
"""
_DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': None,
'key_serializer': None,
'value_serializer': None,
'acks': 1,
'compression_type': None,
'retries': 0,
'batch_size': 16384,
'linger_ms': 0,
'partitioner': DefaultPartitioner(),
'buffer_memory': 33554432,
'connections_max_idle_ms': 600000, # not implemented yet
'max_block_ms': 60000,
'max_request_size': 1048576,
'metadata_max_age_ms': 300000,
'retry_backoff_ms': 100,
'request_timeout_ms': 30000,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'reconnect_backoff_ms': 50,
'max_in_flight_requests_per_connection': 5,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'api_version': 'auto',
}
def __init__(self, **configs):
log.debug("Starting the Kafka producer") # trace
self.config = copy.copy(self._DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs.pop(key)
# Only check for extra config keys in top-level class
assert not configs, 'Unrecognized configs: %s' % configs
if self.config['client_id'] is None:
self.config['client_id'] = 'kafka-python-producer-%s' % \
PRODUCER_CLIENT_ID_SEQUENCE.increment()
if self.config['acks'] == 'all':
self.config['acks'] = -1
client = KafkaClient(**self.config)
# Check Broker Version if not set explicitly
if self.config['api_version'] == 'auto':
self.config['api_version'] = client.check_version()
assert self.config['api_version'] in ('0.9', '0.8.2', '0.8.1', '0.8.0')
# Convert api_version config to tuple for easy comparisons
self.config['api_version'] = tuple(
map(int, self.config['api_version'].split('.')))
if self.config['compression_type'] == 'lz4':
assert self.config['api_version'] >= (0, 8, 2), 'LZ4 Requires >= Kafka 0.8.2 Brokers'
self._accumulator = RecordAccumulator(**self.config)
self._metadata = client.cluster
self._sender = Sender(client, self._metadata, self._accumulator,
**self.config)
self._sender.daemon = True
self._sender.start()
self._closed = False
atexit.register(self.close, timeout=0)
log.debug("Kafka producer started")
def __del__(self):
self.close(timeout=0)
def close(self, timeout=None):
"""Close this producer."""
if not hasattr(self, '_closed') or self._closed:
log.info('Kafka producer closed')
return
if timeout is None:
timeout = 999999999
assert timeout >= 0
log.info("Closing the Kafka producer with %s secs timeout.", timeout)
#first_exception = AtomicReference() # this will keep track of the first encountered exception
invoked_from_callback = bool(threading.current_thread() is self._sender)
if timeout > 0:
if invoked_from_callback:
log.warning("Overriding close timeout %s secs to 0 in order to"
" prevent useless blocking due to self-join. This"
" means you have incorrectly invoked close with a"
" non-zero timeout from the producer call-back.",
timeout)
else:
# Try to close gracefully.
if self._sender is not None:
self._sender.initiate_close()
self._sender.join(timeout)
if self._sender is not None and self._sender.is_alive():
log.info("Proceeding to force close the producer since pending"
" requests could not be completed within timeout %s.",
timeout)
self._sender.force_close()
# Only join the sender thread when not calling from callback.
if not invoked_from_callback:
self._sender.join()
try:
self.config['key_serializer'].close()
except AttributeError:
pass
try:
self.config['value_serializer'].close()
except AttributeError:
pass
self._closed = True
log.debug("The Kafka producer has closed.")
def partitions_for(self, topic):
"""Returns set of all known partitions for the topic."""
max_wait = self.config['max_block_ms'] / 1000.0
return self._wait_on_metadata(topic, max_wait)
def send(self, topic, value=None, key=None, partition=None):
"""Publish a message to a topic.
Arguments:
topic (str): topic where the message will be published
value (optional): message value. Must be type bytes, or be
serializable to bytes via configured value_serializer. If value
is None, key is required and message acts as a 'delete'.
See kafka compaction documentation for more details:
http://kafka.apache.org/documentation.html#compaction
(compaction requires kafka >= 0.8.1)
partition (int, optional): optionally specify a partition. If not
set, the partition will be selected using the configured
'partitioner'.
key (optional): a key to associate with the message. Can be used to
determine which partition to send the message to. If partition
is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same
partition (but if key is None, partition is chosen randomly).
Must be type bytes, or be serializable to bytes via configured
key_serializer.
Returns:
FutureRecordMetadata: resolves to RecordMetadata
Raises:
KafkaTimeoutError: if unable to fetch topic metadata, or unable
to obtain memory buffer prior to configured max_block_ms
"""
assert value is not None or self.config['api_version'] >= (0, 8, 1), (
'Null messages require kafka >= 0.8.1')
assert not (value is None and key is None), 'Need at least one: key or value'
try:
# first make sure the metadata for the topic is
# available
self._wait_on_metadata(topic, self.config['max_block_ms'] / 1000.0)
key_bytes, value_bytes = self._serialize(topic, key, value)
partition = self._partition(topic, partition, key, value,
key_bytes, value_bytes)
message_size = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
if key_bytes is not None:
message_size += len(key_bytes)
if value_bytes is not None:
message_size += len(value_bytes)
self._ensure_valid_record_size(message_size)
tp = TopicPartition(topic, partition)
log.debug("Sending (key=%s value=%s) to %s", key, value, tp)
result = self._accumulator.append(tp, key_bytes, value_bytes,
self.config['max_block_ms'])
future, batch_is_full, new_batch_created = result
if batch_is_full or new_batch_created:
log.debug("Waking up the sender since %s is either full or"
" getting a new batch", tp)
self._sender.wakeup()
return future
# handling exceptions and record the errors;
# for API exceptions return them in the future,
# for other exceptions raise directly
except Errors.KafkaTimeoutError:
raise
except AssertionError:
raise
except Exception as e:
log.debug("Exception occurred during message send: %s", e)
return FutureRecordMetadata(
FutureProduceResult(TopicPartition(topic, partition)),
-1).failure(e)
def flush(self, timeout=None):
"""
Invoking this method makes all buffered records immediately available
to send (even if linger_ms is greater than 0) and blocks on the
completion of the requests associated with these records. The
post-condition of flush() is that any previously sent record will have
completed (e.g. Future.is_done() == True). A request is considered
completed when either it is successfully acknowledged according to the
'acks' configuration for the producer, or it results in an error.
Other threads can continue sending messages while one thread is blocked
waiting for a flush call to complete; however, no guarantee is made
about the completion of messages sent after the flush call begins.
"""
log.debug("Flushing accumulated records in producer.") # trace
self._accumulator.begin_flush()
self._sender.wakeup()
self._accumulator.await_flush_completion(timeout=timeout)
def _ensure_valid_record_size(self, size):
"""Validate that the record size isn't too large."""
if size > self.config['max_request_size']:
raise Errors.MessageSizeTooLargeError(
"The message is %d bytes when serialized which is larger than"
" the maximum request size you have configured with the"
" max_request_size configuration" % size)
if size > self.config['buffer_memory']:
raise Errors.MessageSizeTooLargeError(
"The message is %d bytes when serialized which is larger than"
" the total memory buffer you have configured with the"
" buffer_memory configuration." % size)
def _wait_on_metadata(self, topic, max_wait):
"""
Wait for cluster metadata including partitions for the given topic to
be available.
Arguments:
topic (str): topic we want metadata for
max_wait (float): maximum time in secs for waiting on the metadata
Returns:
set: partition ids for the topic
Raises:
TimeoutException: if partitions for topic were not obtained before
specified max_wait timeout
"""
# add topic to metadata topic list if it is not there already.
self._sender.add_topic(topic)
begin = time.time()
elapsed = 0.0
metadata_event = None
while True:
partitions = self._metadata.partitions_for_topic(topic)
if partitions is not None:
return partitions
if not metadata_event:
metadata_event = threading.Event()
log.debug("Requesting metadata update for topic %s", topic)
metadata_event.clear()
future = self._metadata.request_update()
future.add_both(lambda e, *args: e.set(), metadata_event)
self._sender.wakeup()
metadata_event.wait(max_wait - elapsed)
elapsed = time.time() - begin
if not metadata_event.is_set():
raise Errors.KafkaTimeoutError(
"Failed to update metadata after %s secs.", max_wait)
elif topic in self._metadata.unauthorized_topics:
raise Errors.TopicAuthorizationFailedError(topic)
else:
log.debug("_wait_on_metadata woke after %s secs.", elapsed)
def _serialize(self, topic, key, value):
# pylint: disable-msg=not-callable
if self.config['key_serializer']:
serialized_key = self.config['key_serializer'](key)
else:
serialized_key = key
if self.config['value_serializer']:
serialized_value = self.config['value_serializer'](value)
else:
serialized_value = value
return serialized_key, serialized_value
def _partition(self, topic, partition, key, value,
serialized_key, serialized_value):
if partition is not None:
assert partition >= 0
assert partition in self._metadata.partitions_for_topic(topic), 'Unrecognized partition'
return partition
all_partitions = list(self._metadata.partitions_for_topic(topic))
available = list(self._metadata.available_partitions_for_topic(topic))
return self.config['partitioner'](serialized_key,
all_partitions,
available)
|
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from abc import ABCMeta, abstractmethod, abstractproperty
from eventlet import greenthread
from neutron.common import exceptions as n_exc
try:
from neutron.extensions import loadbalancer
except ImportError:
from neutron_lbaas.extensions import loadbalancer
from neutron.plugins.common import constants
from vnc_api.vnc_api import NoIdError, RefsExistError
import six
import uuid
class LoadbalancerMethodInvalid(n_exc.BadRequest):
message = "Method %(lb_method)s not supported for pool %(pool_id)s"
@six.add_metaclass(ABCMeta)
class ResourceManager(object):
_max_project_read_attempts = 3
def __init__(self, api):
self._api = api
@abstractproperty
def property_type_mapping(self):
""" Mapping from property name to neutron dict key.
"""
pass
@abstractmethod
def make_properties(self, resource):
""" Returns the properties for the specified resource.
"""
pass
@abstractmethod
def make_dict(self, resource, fields):
""" Return the contrail api resource in the dictionary format
expected by neutron.
"""
pass
@abstractmethod
def resource_read(self, id):
""" Read the specified resource from the api server.
"""
pass
@abstractmethod
def resource_list(self, tenant_id):
""" Returns the list of objects from the api server.
"""
pass
@abstractmethod
def resource_update(self, obj):
""" Call the update method.
"""
pass
@abstractmethod
def resource_delete(self, id):
""" Delete the specified resource from the api server.
"""
pass
@abstractproperty
def get_exception_notfound(self, id):
""" Returns the correct NotFound exception.
"""
pass
@abstractproperty
def get_exception_inuse(self, id):
""" Returns the correct NotFound exception.
"""
pass
@abstractproperty
def neutron_name(self):
""" Resource name in a request from neutron.
"""
pass
@abstractproperty
def resource_name_plural(self):
""" Resource list name in a list response from api server.
"""
pass
@abstractmethod
def create(self, context, resource):
""" Create resource.
"""
pass
@abstractmethod
def update_properties(self, obj, id, resource):
""" Update the resource properties
"""
return False
def update_object(self, obj, id, resource):
""" Update object metadata other than properties
"""
return False
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = 'Cannot create resource for another tenant'
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_resource_name(self, resource, parent, name, uuid):
""" Generate an unique name. This is display name if there are
no conflicts or display_name + uuid.
"""
fq_name = list(parent.fq_name)
fq_name.append(name)
try:
self._api.fq_name_to_id(resource, fq_name)
except NoIdError:
return name
return name + '-' + uuid
def _is_authorized(self, context, resource):
return context.is_admin or context.tenant_id == resource['tenant_id']
def _project_read(self, project_id):
""" Reads the project from the api server. The project will be
created it does not yet exist.
"""
for i in range(self._max_project_read_attempts):
try:
return self._api.project_read(id=str(uuid.UUID(project_id)))
except NoIdError:
pass
greenthread.sleep(1)
raise n_exc.TenantNetworksDisabled()
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _apply_filter(self, resource, filters):
if filters is None:
return True
for key, value in filters.iteritems():
if key in resource and not resource[key] in value:
return False
return True
def _get_object_status(self, obj):
id_perms = obj.get_id_perms()
if id_perms and id_perms.enable:
return constants.ACTIVE
return constants.PENDING_DELETE
def _get_object_description(self, obj):
id_perms = obj.get_id_perms()
if id_perms is None:
return None
return id_perms.description
def _get_object_tenant_id(self, obj):
proj_fq_name = obj.get_fq_name()[0:2]
try:
proj = self._api.project_read(fq_name=proj_fq_name)
except NoIdError:
return None
return proj.uuid
def get_resource(self, context, id, fields=None):
""" Implement GET by uuid.
"""
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if not context.is_admin and tenant_id != project_id:
raise self.get_exception_notfound(id=id)
return self.make_dict(obj, fields)
def _get_resource_dict(self, uuid, filters, fields):
try:
obj = self.resource_read(id=uuid)
except NoIdError:
return None
res = self.make_dict(obj, None)
if not self._apply_filter(res, filters):
return None
return self._fields(res, fields)
def get_collection(self, context, filters=None, fields=None):
""" Generic implementation of list command.
"""
response = []
if filters and 'id' in filters:
for v in filters['id']:
res = self._get_resource_dict(v, filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
tenant_id = None
if not context.is_admin:
tenant_id = context.tenant_id
obj_list = self.resource_list(tenant_id=tenant_id)
if self.resource_name_plural not in obj_list:
return response
for v in obj_list[self.resource_name_plural]:
res = self._get_resource_dict(v['uuid'], filters, fields)
if res is not None:
response.append(res)
return response
def delete(self, context, id):
if not context.is_admin:
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if tenant_id != project_id:
raise n_exc.NotAuthorized()
try:
self.resource_delete(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
except RefsExistError:
raise self.get_exception_inuse(id=id)
def update_properties_subr(self, props, resource):
""" Update the DB properties object from the neutron parameters.
"""
change = False
for key, mapping in self.property_type_mapping.iteritems():
if mapping not in resource:
continue
if getattr(props, key) != resource[mapping]:
setattr(props, key, resource[mapping])
change = True
return change
def update(self, context, id, resource):
""" Update the resource.
"""
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
id_perms = obj.get_id_perms()
if not id_perms or not id_perms.enable:
raise loadbalancer.StateInvalid(id=id,
state=constants.PENDING_DELETE)
r = resource[self.neutron_name]
if r:
update = False
if 'description' in r and id_perms.description != r['description']:
id_perms.description = r['description']
obj.set_id_perms(id_perms)
update = True
if self.update_properties(obj, id, r):
update = True
if self.update_object(obj, id, r):
update = True
if update:
self.resource_update(obj)
return self.make_dict(obj)
|
|
# ---------------------------
# Imports
# ---------------------------
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import discord
import sys
import subprocess
import asyncio
import commands
import personalvars
# ---------------------------
# Personal Variables
# ---------------------------
TokenLocation = personalvars.token_location()
DESTServ = personalvars.rise_server()
DESTChan = personalvars.rise_channel()
riseMSG = personalvars.rise_message()
MUTE_MESSAGE_TEXT = personalvars.mute_cmd_msg()
# ---------------------------
# Initialization
# ---------------------------
yawgmoth = discord.Client()
# ---------------------------
# Event: Ready
# ---------------------------
@yawgmoth.event
@asyncio.coroutine
def on_ready():
riseServer = ""
riseChannel= ""
print("Destination server for rising: " + DESTServ)
serverlist = list(yawgmoth.servers)
for server in serverlist:
if server.name.lower() == DESTServ.lower():
riseServer = server
print("Rise server located")
if not riseServer:
print("No server found with name " + DESTServ)
else:
modsetup = commands.setup_mods(riseServer)
print("Setting up yawgmods...\n{}".format(modsetup))
print("Destination channel for rising: " + DESTChan)
channellist = list(riseServer.channels)
for channel in channellist:
if channel.name.lower() == DESTChan.lower():
riseChannel = channel
print("Rise channel located")
print('User:' + '\t\t' + yawgmoth.user.name)
print('ID:' + '\t\t' + yawgmoth.user.id)
if riseServer:
print('Server:' + '\t\t' + riseServer.name + ", " + riseServer.id)
if riseChannel:
print('Channel:' + '\t' + riseChannel.name)
yield from yawgmoth.send_message(riseChannel, riseMSG)
# ---------------------------
# Event: Message
# ---------------------------
@yawgmoth.event
@asyncio.coroutine
def on_message(message):
response = ''
if message.author not in commands.ignored_users:
if message.author in commands.muted_users:
yield from yawgmoth.delete_message(message)
else:
response = commands.cmd_fetch(message)
##############
# Card Specs #
##############
if message.content.startswith('!details'):
response += commands.cmd_details(message)
if message.content.startswith('!rulings'):
response += commands.cmd_rulings(message)
if message.content.startswith('!image'):
response += commands.cmd_image(message)
if message.content.startswith('!price'):
response += commands.cmd_price(message)
############
# Banlists #
############
if message.content.startswith('!standardban'):
response += commands.cmd_standardban(message)
if message.content.startswith('!modernban'):
response += commands.cmd_modernban(message)
if message.content.startswith('!legacyban'):
response += commands.cmd_legacyban(message)
if message.content.startswith('!vintageban'):
response += commands.cmd_vintageban(message)
if message.content.startswith('!edhban'):
response += commands.cmd_edhban(message)
############
# Misc MTG #
############
if message.content.startswith('!rules'):
response += 'http://media.wizards.com/2016/docs/MagicCompRules_04082016.pdf'
############
# Bot Info #
############
if message.content.startswith('!git'):
response += commands.cmd_git(message)
if message.content.startswith('!version'):
response += commands.cmd_version(message)
##############
# Just 4 Fun #
##############
if message.content.startswith('!obey'):
response += commands.cmd_obey(message)
if message.content.startswith('!moon'):
response += commands.cmd_moon(message)
if message.content.startswith('!sun'):
response += ':sun_with_face:'
if message.content.startswith('!blush'):
response += ':yawgblush:'
if message.content.startswith('!sheep'):
response += ':sheep:'
if message.content.startswith('!pingme'):
response += commands.cmd_ping(message)
if message.content.startswith('!temp'):
response += commands.cmd_temp(message)
#####################
# Role Change Block #
#####################
if message.content.startswith('!lfg') or \
message.content.startswith('!cockatrice') or \
message.content.startswith('!gauntlet') or \
message.content.startswith('!ranked') or \
message.content.startswith('!shitposter') or \
message.content.startswith('!foodforthonk'):
todo = ['n/a', 'How did you even get to this place in the code?']
if message.content.startswith('!lfg'):
todo = commands.cmd_rolelfg(message)
if message.content.startswith('!cockatrice'):
todo = commands.cmd_rolech(message, 'Cockatrice')
if message.content.startswith('!gauntlet'):
todo = commands.cmd_rolech(message, 'Gauntlet')
if message.content.startswith('!ranked'):
todo = commands.cmd_rolech(message, 'Ranked')
if message.content.startswith('!shitposter'):
todo = commands.cmd_rolech(message, 'Shitposter')
if message.content.startswith('!foodforthonk'):
todo = commands.cmd_rolech(message, 'Member')
if todo[0] == 'n/a':
response += todo[1]
if todo[0] == 'Add':
yield from yawgmoth.add_roles(todo[1], todo[2])
response += todo[3]
if todo[0] == 'Remove':
yield from yawgmoth.remove_roles(todo[1], todo[2])
response += todo[3]
if todo[0] == 'AddLfg':
yield from yawgmoth.add_roles(todo[1], *todo[2])
response += todo[3]
if todo[0] == 'RemoveLfg':
yield from yawgmoth.remove_roles(todo[1], *todo[2])
response += todo[3]
################
# Mod Commands #
################
if message.content.startswith('!ignore'):
response += commands.cmd_ignore(message)
if message.content.startswith('!yawgmod'):
response += commands.cmd_yawgmod(message)
if message.content.startswith('!clearignore'):
response += commands.cmd_clearignore(message)
##################
# Admin Commands #
##################
if message.content.startswith('!reset'):
response += commands.cmd_reset(message)
if message.content.startswith('!reboot') or message.content.startswith('!nogitreset'):
response += commands.cmd_reboot(message)
if message.content.startswith('!shutdown'):
response += commands.cmd_shutdown(message)
if message.content.startswith('!mute'):
mute_resp = commands.cmd_mute(message)
if mute_resp[0]:
response += mute_resp[1] + mute_resp[2]
#yield from yawgmoth.send_message(mute_resp[1], MUTE_MESSAGE_TEXT)
else:
response += mute_resp[1]
####################
# Admin Just 4 Fun #
####################
if message.content.startswith('!gametime'):
gn = commands.cmd_gametime(message)
if gn: #If a non-admin tries this command, gn will be blank
if gn == 'CLEAR':
yield from yawgmoth.change_presence()
else:
yield from yawgmoth.change_presence(game=discord.Game(name=gn))
if message.content.startswith('!typing'):
yield from yawgmoth.send_typing(message.channel)
yield from yawgmoth.delete_message(message)
if message.content.startswith('!echo'):
eResp = commands.cmd_echo(message)
if eResp: #If a non-admin tries this command, eResp will be blank
response += eResp
yield from yawgmoth.delete_message(message)
if response:
yield from yawgmoth.send_message(message.channel, response)
# ---------------------------
# Startup
# ---------------------------
with open (TokenLocation, "r") as myfile:
token=myfile.read()
yawgmoth.run(token)
|
|
#!/usr/bin/env python
# Copyright (c) 2004-20010 ActiveState Software Inc.
# Written by: Trent Mick <[email protected]>
# License: MIT License (http://www.opensource.org/licenses/mit-license.php)
"""
platinfo.py -- standardized determination of platform info
>>> from platinfo import PlatInfo
>>> pi = PlatInfo()
>>> pi.os
'linux'
>>> pi.arch
'x86'
# A number of pieces of info gathered (some of them plat-dependent).
>>> pi.as_dict()
{'arch': 'x86',
'distro': 'SuSE',
'distro_desc': 'SuSE Linux 9.0 (i586)',
'distro_ver': '9.0',
'libcpp': 'libcpp5',
'lsb_version': '1.3',
'name': 'linux-x86',
'os': 'linux',
'os_ver': '2.4.21'}
# The default name is "<os>-<arch>"...
>>> pi.name() # default
'linux-x86'
>>> print pi
linux-x86
# ...but that can be customized with some rules.
>>> pi.name('os', 'distro', 'arch')
'linux-suse-x86'
>>> pi.name('os', 'distro+distro_ver', 'arch')
'linux-suse9-x86'
>>> pi.name('os+os_ver[:2]', 'arch')
'linux2.4-x86'
>>> pi.name('os', 'arch', sep='/')
'linux/x86'
# The full name provide a little bit more info.
>>> pi.fullname()
'linux2.4-suse9-x86'
# platname() is a shortcut for PlatInfo.name(...).
>>> from platinfo import platname
>>> platname('os', 'distro', 'arch')
'linux-suse-x86'
This module determines and returns standardized names for
platforms, where the "standard" is Trent Mick's reasoning :)
from experience building ActivePython on a fairly large number of
platforms.
The driving goal is to provide platform names that are:
- relatively short
- readable (as much as possible making matching the given name to an
actually machine self-explanatory)
- be capable enough to produce all names needed to distinguish all
platform-specific application builds
- generally safe for usage in filenames
- not ugly (e.g. "MSWin32" is ugly)
Generally some of these names match those used for ActiveTcl and
ActivePerl where that makes sense (for example, "windows" is used
instead of Perl's burdensome "MSWin32"). See the particular method
docstrings for more details.
"""
# Development notes:
# - The name of this module is intentionally not "platform" to not
# conflict with (Marc-Andre Lemburg's?) platform.py in the stdlib.
# - YAGNI: Having a "quick/terse" mode. Will always gather all possible
# information unless come up with a case to NOT do so.
__version_info__ = (2, 0, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
import sys
import re
import tempfile
import logging
import errno
import subprocess
from pprint import pprint
from os.path import exists
import warnings
log = logging.getLogger("platinfo")
#---- exceptions
class Error(Exception):
pass
class InternalError(Error):
def __str__(self):
return Error.__str__(self) + """
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Please report this error by adding a bug here:
* http://code.google.com/p/platinfo/issues/list
* or, by sending an email to <[email protected]>.
*
* I'd like to keep improving `platinfo2.py' to cover as many platforms
* as possible. Please be sure to include the error message above and
* any addition information you think might be relevant. Thanks!
* -- Trent
*
* platinfo version: %s
* python version: %s
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *""" % (
__version_info__, sys.version_info)
class LinuxDistroVersionWarning(RuntimeWarning):
pass
warnings.simplefilter("once", LinuxDistroVersionWarning)
#---- public API
class PlatInfo(object):
"""Platform information for the current machine."""
#TODO: allow incoming 'win32' and 'win64'?
_known_oses = set(
"windows hpux linux macosx aix solaris freebsd openbsd".split())
# Note: `ia64_32` is the name being used for 32-bit builds on
# HP-UX IA64. This is a hacky name (mixing plat and build info), but
# it isn't the first one.
_known_archs = set(
"x86 powerpc powerpc64 ppc x64 x86_64 ia64 ia64_32 sparc sparc64 parisc".split())
@classmethod
def from_name(cls, name):
"""Create a PlatInfo instance from a platname string.
This only knows how to deal with "os[os_ver]-arch[arch_ver]".
For example:
GOOD: windows-x86, hpux-parisc2.0, aix5-powerpc
BAD: linux-libcpp5-x86
"""
parts = name.split('-')
if len(parts) != 2:
raise Error("cannot parse a platname that doesn't match "
"'os[os_ver]-arch[arch_ver]': %r" % name)
data = {}
if parts[0] in cls._known_oses:
data["os"] = parts[0]
else:
for known_os in cls._known_oses:
if parts[0].startswith(known_os):
data["os"] = known_os
data["os_ver"] = parts[0][len(known_os):]
break
else:
raise Error("could not part os-part of platform name: %r"
% parts[0])
if parts[1] in cls._known_archs:
data["arch"] = parts[1]
else:
for known_arch in cls._known_archs:
if parts[1].startswith(known_arch):
data["arch"] = known_arch
data["arch_ver"] = parts[1][len(known_arch):]
break
else:
raise Error("could not part arch-part of platform name: %r"
% parts[1])
return cls(**data)
def __init__(self, **kwargs):
"""If created with no arguments, all available data for the current
platform will be determine. If called with arguments, the PlatInfo
will just use those as all platform info. E.g.,
>>> p = PlatInfo(os='windows', arch='x86')
>>> p.name()
'windows-x86'
"""
if kwargs:
self.__dict__ = kwargs
elif sys.platform == "win32":
self._init_windows()
elif sys.platform.startswith("linux"):
self._init_linux()
elif sys.platform.startswith("sunos"):
self._init_solaris()
elif sys.platform.startswith("hp-ux"):
self._init_hpux()
elif sys.platform.startswith("aix"):
self._init_aix()
elif sys.platform == "darwin":
self._init_mac()
elif sys.platform.startswith("freebsd"):
self._init_freebsd()
elif sys.platform.startswith("openbsd"):
self._init_openbsd()
elif sys.platform.startswith("netbsd"):
self._init_netbsd()
else:
raise InternalError("unknown platform: '%s'" % sys.platform)
def __str__(self):
return self.name()
def __repr__(self):
args = ['%s=%r' % item for item in self.__dict__.items()]
class_parts = [self.__class__.__name__]
if self.__class__.__module__ != "__main__":
class_parts.insert(0, self.__class__.__module__)
return "%s(%s)" % ('.'.join(class_parts), ', '.join(args))
def match(self, **kwargs):
for name, value in kwargs.items():
if getattr(self, name) != value:
return False
else:
return True
def name(self, *rules, **kwargs):
"""name([rules...]) --> platform name
Return a string representation for this platform.
Keyword args:
'sep' is a string to use for joining platform data.
'errors' is a string defining what to do if a given platform
data does not exist for this platform. E.g. if the rule
"distro" is given for Windows. Valid values are:
"ignore" - just skip that datum (this is the default)
"strict" - raise an Error
'filesafe' is a boolean (default False) indicating if the
returned name should be made safe for usage in a filename.
'lower' is a boolean (default True) indicating if the returned
name should be lowercased.
Rule Syntax:
os # just a datum name
os+os_ver # use '+' to join without the 'sep'
"""
sep = kwargs.get("sep", "-")
errors = kwargs.get("errors", "ignore")
filesafe = kwargs.get("filesafe", False)
if filesafe:
raise InternalError("name(..., filesafe=True) not yet implemented")
lower = kwargs.get("lower", True)
if not rules:
rules = ("os", "arch")
#print "RULES:", rules
bits = []
for rule in rules:
bit = self._eval_rule(rule, errors=errors)
if bit:
bits.append(bit)
if lower:
return sep.join(bits).lower()
else:
return sep.join(bits)
def fullname(self):
parts = []
if sys.platform == "win32":
parts = ["os", "os_name"]
else:
parts = ["os+os_ver[:2]"]
parts += ["distro+distro_ver", "libc", "glibc+glibc_ver[:2]",
"libcpp", "arch+arch_ver"]
return self.name(*parts)
_token_parser = re.compile(r"^([\w]+)(\[[\d:]+\])?$")
def _eval_rule(self, rule, errors):
assert errors in ("strict", "ignore")
bits = []
for token in rule.split('+'):
m = self._token_parser.search(token)
if not m:
if errors == "strict":
raise Error("illegal token: '%s'" % token)
elif errors == "ignore":
continue
item_name, slice_str = m.groups()
try:
value = getattr(self, item_name)
except AttributeError:
if errors == "strict":
raise Error("no '%s' info for this platform" % item_name)
elif errors == "ignore":
continue
if slice_str and not item_name.endswith("_ver"):
if errors == "strict":
raise Error("slicing only allowed on '*_ver' items: '%s'"
% token)
elif errors == "ignore":
continue
elif slice_str:
parts = _split_ver(value)
value = _join_ver( eval(str(parts)+slice_str) )
bits.append(value)
return ''.join(bits)
def as_dict(self):
"""Return a dict representation of the platform info."""
d = self.__dict__.copy()
assert "name" not in d, "conflict with `name` datum"
d["name"] = self.name()
return d
def as_xml(self):
from xml.sax.saxutils import escape
indent = ' '*2
s = '<platinfo version="%s">\n' % __version__
for key, value in self.as_dict().items():
s += indent + '<%s>%s</%s>\n' % (key, escape(value), key)
s += '</platinfo>'
return s
def as_yaml(self):
# prefix with '---\n' YAML doc-separator?
s = '--- platinfo version="%s"\n' % __version__
parts = ["%s: %s" % i for i in self.as_dict().items()]
s += '\n'.join(parts)
return s
def _init_windows(self):
#XXX Right answer here is GetSystemInfo().
#XXX Does this work on all Windows flavours?
self.os = "windows"
PROCESSOR_ARCHITECTURE = os.environ.get("PROCESSOR_ARCHITECTURE")
if PROCESSOR_ARCHITECTURE == "IA64":
self.arch = "ia64"
elif PROCESSOR_ARCHITECTURE == "x86":
self.arch = "x86"
elif PROCESSOR_ARCHITECTURE == "AMD64":
self.arch = "x64"
else:
raise InternalError("unknown Windows PROCESSOR_ARCHITECTURE: %r"
% PROCESSOR_ARCHITECTURE)
# Get some additional info from Python's core platform.py, if
# available.
#XXX Would be nice to extend platform.py's win32_ver to use
# the extra OSVERSIONINFOEX structure elements (esp. service
# package version).
try:
import platform
except ImportError:
log.debug("cannot get extra windows os info: no platform.py")
else:
release, version, csd, ptype = platform.win32_ver()
if not release:
log.debug("platform.py could not get extra windows os info")
if release: self.os_name = release
if version: self.os_ver = version
if csd: self.os_csd = csd
def _init_freebsd(self):
self.os = "freebsd"
uname = os.uname()
self.os_ver = uname[2].split('-', 1)[0]
arch = uname[-1]
if re.match(r"i\d86", arch):
self.arch = "x86"
else:
raise InternalError("unknown FreeBSD architecture: '%s'" % arch)
def _init_openbsd(self):
self.os = "openbsd"
uname = os.uname()
self.os_ver = uname[2].split('-', 1)[0]
arch = uname[-1]
if re.match(r"i\d86", arch):
self.arch = "x86"
elif arch == "amd64":
self.arch = "x86_64"
else:
raise InternalError("unknown OpenBSD architecture: '%s'" % arch)
def _init_netbsd(self):
self.os = "netbsd"
uname = os.uname()
self.os_ver = uname[2].split('-', 1)[0]
arch = uname[-1]
if re.match(r"i\d86", arch):
self.arch = "x86"
else:
raise InternalError("unknown NetBSD architecture: '%s'" % arch)
def _init_linux(self):
self.os = "linux"
uname = os.uname()
self.os_ver = uname[2].split('-', 1)[0]
# Determine hardware type from 'uname -m' -- os.uname() is not
# reliable: reports "i686" on iron (a Linux/IA64 box).
o = os.popen('uname -m 2> /dev/null')
arch = o.read().strip()
o.close()
if arch == "ia64":
self.arch = "ia64"
elif re.match("i\d86", arch):
self.arch = "x86"
elif arch == "x86_64":
self.arch = "x86_64"
elif arch == "ppc":
self.arch = "ppc"
else:
raise InternalError("unknown Linux architecture: '%s'" % arch)
self._set_linux_distro_info()
lib_info = _get_linux_lib_info()
if "libstdc++" in lib_info:
self.libcpp = "libcpp" + lib_info["libstdc++"]
if "libc" in lib_info:
# For now, only the major 'libc' version number is used.
self.libc = "libc" + lib_info["libc"].split('.')[0]
if "glibc" in lib_info:
self.glibc = "glibc"
self.glibc_ver = lib_info["glibc"]
def _init_solaris(self):
self.os = "solaris"
uname = os.uname()
if uname[2].startswith("5."):
self.os_ver = uname[2].split(".", 1)[1]
else:
raise InternalError("unknown Solaris version: '%s'" % uname[2])
if uname[4].startswith("sun4"):
self.arch = "sparc"
arch_ver = _get_sparc_arch_ver()
if arch_ver is not None:
self.arch_ver = arch_ver
if int(arch_ver) >= 9:
self.arch = "sparc64"
elif uname[4].startswith("i86pc"):
self.arch = "x86"
else:
raise InternalError("unknown Solaris architecture: '%s'" % uname[4])
def _init_hpux(self):
self.os = "hpux"
uname = os.uname()
if uname[4] == "ia64":
self.arch = "ia64"
elif uname[4] == "9000/800":
self.arch = "parisc"
self.arch_ver = _get_hpux_parisc_arch_ver()
else:
raise InternalError("unknown HP-UX architecture: '%s'" % uname[4])
try:
self.os_ver = uname[2].split('.', 1)[1] # e.g. "B.11.00"
except IndexError:
raise InternalError("unknown HP-UX version: could not "
"parse '%s' from uname" % uname[2])
def _init_aix(self):
uname = os.uname()
self.os = "aix"
self.os_ver = "%s.%s" % (uname[3], uname[2])
# Determine processor type from 'uname -p' -- os.uname() does not
# have this.
o = os.popen('uname -p 2> /dev/null')
arch = o.read().strip()
o.close()
# 32-bit or 64-bit?
# Ideas from http://www.stata.com/support/faqs/win/64bit.html
o = os.popen('getconf -v KERNEL_BITMODE 2> /dev/null')
kbitmode = o.read().strip()
o.close()
if kbitmode:
sixtyfour = '64' in kbitmode
else:
o = os.popen('file /usr/lib/boot/unix* 2> /dev/null')
listing = o.read().strip()
o.close()
sixtyfour = '64-bit XCOFF executable' in listing
self.arch = arch + (sixtyfour and '64' or '')
def _init_mac(self):
# Determine processor type from 'uname -p' -- os.uname() does not
# have this.
o = os.popen('uname -p 2> /dev/null')
self.arch = {"powerpc": "powerpc",
"i386": "x86"}[o.read().strip()]
o.close()
# Historically Python code (e.g. platform.py) has used the
# Gestalt Manager to retrieve Mac OS system info. However, this
# has problems on macosx-x86. As well,
# <http://tinyurl.com/9ssrn> says this:
#
# A better way to obtain version information in Mac OS X is to
# read the system version information from the following file:
#
# /System/Library/CoreServices/SystemVersion.plist
#
# Note: If there end up being problems using this, then try
# getting info from `sw_vers`.
getters = [
self._get_macos_ver_info_from_plist,
self._get_macos_ver_info_from_gestalt,
]
for getter in getters:
ver_info = getter()
if ver_info:
assert "os_ver" in ver_info
for n,v in ver_info.items():
setattr(self, n, v)
if self.os_ver.startswith("10."):
self.os = "macosx"
else:
self.os = "macos"
break
else:
self.os = "macos"
import warnings
warnings.warn("could not determine Mac OS version info: "
"consider adding support for parsing `sw_vers`")
self.darwin_ver = os.uname()[2]
def _get_macos_ver_info_from_gestalt(self):
import gestalt
import MacOS
try:
sysv = gestalt.gestalt("sysv")
except MacOS.Error:
# On Mac OS X/Intel (at least on the early release dev
# boxes) the Gestalt Manager does not seem to be intialized
# with the standard selectors -- or the equivalent.
# Requesting information on "sysv", "sysu", etc. all return:
# gestaltUndefSelectorErr (-5551)
# Specifies an undefined selector was passed to the
# Gestalt Manager.
pass
else:
def bcd2int(bcd): return int(hex(bcd)[2:])
major = bcd2int((sysv & 0xFF00) >> 8)
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
return {
"os_ver": "%d.%d.%d" % (major, minor, patch),
}
def _get_macos_ver_info_from_plist(self):
"""Retrive Mac OS system information from
/System/Library/CoreServices/SystemVersion.plist
as suggested here:
http://tinyurl.com/9ssrn
"""
plist_path = "/System/Library/CoreServices/SystemVersion.plist"
if not exists(plist_path):
return
try:
from plistlib import Plist
except ImportError:
return
plist = Plist.fromFile(plist_path)
return {
"os_ver": plist["ProductVersion"],
"os_build": plist["ProductBuildVersion"],
"os_name": plist["ProductName"],
}
def _get_linux_lsb_release_info(self):
"""Some Linux distros have a `lsb_release` that provides some
data about the distro.
"""
try:
try:
import subprocess
except ImportError:
i,o,e = os.popen3("lsb_release --all")
i.close()
stdout = o.read()
stderr = e.read()
o.close()
retval = e.close()
else:
p = subprocess.Popen(["lsb_release", "--all"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
retval = p.wait()
stdout = stdout.decode('utf-8')
except OSError:
# Can happen if "lsb_release" did not exist, bug 82403.
retval = 1 # an error
d = {}
if retval:
return {} # Running lsb_release failed
patterns = {
"distro": re.compile("^Distributor ID:\s+(.*?)\s*$"),
"distro_desc": re.compile("^Description:\s+(.*?)\s*$"),
"distro_ver": re.compile("^Release:\s+(.*?)\s*$"),
"distro_codename": re.compile("^Codename:\s+(.*?)\s*$"),
"lsb_version": re.compile("^LSB Version:\s+(.*?)\s*$"),
}
for line in stdout.splitlines(0):
for name, pattern in patterns.items():
match = pattern.search(line)
if match:
value = match.group(1)
if value != "n/a":
d[name] = value
return d
def _get_linux_release_file_info(self):
try:
etc_files = os.listdir("/etc")
except EnvironmentError:
return {}
# A good list of release-files for various Linux distros is
# here: http://linuxmafia.com/faq/Admin/release-files.html
d = {}
release_file_pat = re.compile(r'(\w+)[-_](release|version)')
candidate_etc_files = []
for etc_file in etc_files:
m = release_file_pat.match(etc_file)
if m:
candidate_etc_files.append((m.group(1), "/etc/"+etc_file))
if not candidate_etc_files:
return {}
patterns = {
"redhat": re.compile("^Red Hat Linux release ([\d\.]+)"),
# As of release 7, "Fedora Core" is not called "Fedora".
"fedora": re.compile("^Fedora release ([\d\.]+)"),
"fedoracore": re.compile("^Fedora Core release ([\d\.]+)"),
"mandrake": re.compile("^Mandrake Linux release ([\d\.]+)"),
# Ignoring the different RHEL flavours (AS, ES, WS) for now.
"rhel": re.compile("^Red Hat Enterprise Linux \w{2} release ([\d\.]+)"),
"suse": re.compile("^SuSE Linux ([\d\.]+)"),
"opensuse": re.compile("^openSUSE ([\d\.]+)"),
"debian": re.compile("^([\d\.]+)"),
"slackware": re.compile("^Slackware ([\d\.]+)"),
"gentoo": re.compile("^Gentoo Base System release ([\d\.]+)"),
}
errmsgs = []
for distro_family, etc_path in candidate_etc_files:
f = open(etc_path, 'r')
first_line = f.readline().rstrip()
f.close()
for distro, pattern in patterns.items():
m = pattern.search(first_line)
if m:
d["distro_family"] = distro_family
d["distro"] = distro
d["distro_ver"] = m.group(1)
if first_line.strip() != m.group(1):
d["distro_desc"] = first_line.strip()
break
errmsgs.append("first line of '%s' (%s)" % (etc_path, first_line))
if d:
break
else:
# If we have a release-file, just fill in "distro_family"
# and "distro" and move on. For example, Arch Linux's
# release file is *empty*.
if candidate_etc_files:
d["distro_family"] = distro_family = candidate_etc_files[0][0]
d["distro"] = distro_family.lower()
warnings.warn("could not determine linux distro_ver %s"
% " or ".join(errmsgs),
LinuxDistroVersionWarning)
return d
def _set_linux_distro_info(self):
"""Determine the following Linux distribution information:
distro
distro_ver (maybe)
distro_family (maybe)
Distro families are "redhat", "debian", and "suse". For
example, Mandrake Linux is a member of the "redhat"
distro family, Ubuntu is a member of the "debian" family.
distro_codename (maybe)
distro_description (maybe)
"""
assert sys.platform.startswith("linux")
# First try to use `lsb_release`.
# - Ubuntu Linux includes "/etc/debian_version" but has its
# useful version info in "/etc/lsb-release" (best parsed from
# `lsb_release`).
# - Is there reason to prefer "/etc/foo[-_](release|version)"
# info over `lsb_release` for any Linux distro?
d = self._get_linux_lsb_release_info()
if "distro" in d and "distro_ver" in d:
for k, v in d.items():
setattr(self, k, v)
return
# Then try to find a release/version file in "/etc".
# - Algorithm borrows from platform.py.
d = self._get_linux_release_file_info()
if "distro" in d:
for k, v in d.items():
setattr(self, k, v)
return
# Then try to use Python's platform.py to help.
try:
import platform
except ImportError:
pass
else:
distro, distro_ver, distro_id = platform.dist()
if distro and distro_ver:
self.distro = distro
self.distro_ver = distro_ver
return
raise InternalError("unknown Linux distro: no `lsb_release`, "
"couldn't find a '/etc/*[-_](version|release)' "
"file, and Python's platform.py couldn't "
"identify the distro either")
def platname(*rules, **kwargs):
"""platname([rules...]) --> platform name
Return a string representation for this platform.
Keyword args:
'sep' is a string to use for joining platform data.
'errors' is a string defining what to do if a given platform
data does not exist for this platform. E.g. if the rule
"distro" is given for Windows. Valid values are:
"ignore" - just skip that datum (this is the default)
"strict" - raise an Error
'filesafe' is a boolean (default False) indicating if the
returned name should be made safe for usage in a filename.
'lower' is a boolean (default True) indicating if the returned
name should be lowercased.
Rule Syntax:
os # just a datum name
os+os_ver # use '+' to join without the 'sep'
"""
return PlatInfo().name(*rules, **kwargs)
#---- internal support stuff
# Note: Not using `subprocess.CalledProcessError` because that isn't in
# Python 2.4.
class RunError(Exception): pass
class ExecutableNotFoundError(RunError): pass
class NonZeroReturnCodeError(RunError): pass
def _run(args, ignore_stderr=False):
"""Run the given command.
@param args {str|list} Command strong or sequence of program arguments. The
program to execute is normally the first item in the args
sequence or the string if a string is given.
@param ignore_stderr {bool} If True, return only stdout; otherwise
return both stdout and stderr combined (2>&1)
@returns {str} The program output.
@raises {RunError} `ExecutableNotFoundError` or `NonZeroReturnCodeError`.
"""
if ignore_stderr:
stderr_pipe = subprocess.PIPE
else:
stderr_pipe = subprocess.STDOUT
try:
p = subprocess.Popen(args=args,
shell=False, # prevent obtrusive shell warnings
stdout=subprocess.PIPE, stderr=stderr_pipe)
except OSError:
_, e, _ = sys.exc_info()
if e.errno == errno.ENOENT:
# `exe` not found
raise ExecutableNotFoundError('The command "%s" cannot be run: %s'
% (args, e))
raise
stdout, stderr = p.communicate()
if p.returncode:
raise NonZeroReturnCodeError('"%s" returned non-zero return code (%d)'
% (args, p.returncode))
return stdout
# Recipe: ver (0.1) in C:\trentm\tm\recipes\cookbook
def _split_ver(ver_str):
"""Parse the given version into a tuple of "significant" parts.
>>> _split_ver("4.1.0")
('4', '1', '0')
>>> _split_ver("1.3a2")
('1', '3', 'a', '2')
"""
bits = [b for b in re.split("(\.|[a-z])", ver_str) if b != '.']
return tuple(bits)
def _join_ver(ver_tuple):
"""Join the given version-tuple, inserting '.' as appropriate.
>>> _join_ver( ('4', '1', '0') )
"4.1.0"
>>> _join_ver( ('1', '3', 'a', '2') )
"1.3a2"
"""
def isint(s):
try:
int(s)
except ValueError:
return False
else:
return True
dotted = []
for bit in ver_tuple:
if dotted and isint(dotted[-1]) and isint(bit):
dotted.append('.')
dotted.append(str(bit))
return ''.join(dotted)
def _get_linux_lib_info():
"""Return a dict of default lib versions for a build on linux.
For example:
{"libstdc++": "5", "libc": "6", "glibc": "2.3.3"}
The 'glibc' version is only returned if 'libc' is >=6.
Some notes on Linux libc versions
---------------------------------
From http://sourceware.org/glibc/glibc-faq.html#s-2.1
libc-4 a.out libc
libc-5 original ELF libc
libc-6 GNU libc
But what are libc.so.7 and libc.so.8 that I see in Google searches (but
not yet in any Linux installs I have access to)?
"""
assert sys.platform.startswith("linux")
tmpdir = _create_temp_dir()
try:
# Compile a test C++ file and get its object dump.
cxxfile = os.path.join(tmpdir, "lib-info.cxx")
f = open(cxxfile, 'w')
try:
f.write("""
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv) { exit(0); }
""")
finally:
f.close()
currdir = os.getcwd()
os.chdir(tmpdir)
try:
try:
_run(['g++', cxxfile], ignore_stderr=True)
except RunError:
_, e, _ = sys.exc_info()
log.debug("could not compile test C++ file with g++: %s", e)
return {}
objdump = os.popen('objdump -p a.out').read()
finally:
os.chdir(currdir)
# Parse the lib versions from the object dump.
# e.g.: libstdc++-libc6.2-2.so.3
patterns = {
"libstdc++": re.compile(r'NEEDED\s+libstdc\+\+(-libc\d+\.\d+\-\d+)?\.so\.(?P<ver>.*)'),
"libc": re.compile(r'NEEDED\s+libc\.so\.(?P<ver>.*)'),
}
lib_info = {}
for name, pattern in patterns.items():
match = pattern.search(objdump)
if not match:
raise InternalError("could not find 'NEEDED %s...' in "
"objdump of compiled test C++ file"
% name)
lib_info[name] = match.group("ver")
finally:
_rmtree(tmpdir)
# If this is glibc, get its version.
if int(_split_ver(lib_info["libc"])[0]) >= 6:
libc_so = os.path.join("/lib", "libc.so."+lib_info["libc"])
o = os.popen(libc_so)
try:
libc_so_ver_line = o.readline().strip()
finally:
retval = o.close()
if retval:
raise InternalError("error running '%s'" % libc_so)
# e.g.:
# GNU C Library stable release version 2.3.3 (20040917), by R...
# GNU C Library stable release version 2.5, by Roland McGrath et al.
pattern = re.compile(r"^GNU C Library.*?(\d+\.\d+(\.\d+)?)")
match = pattern.search(libc_so_ver_line)
if not match:
raise InternalError("error determining glibc version from '%s'"
% libc_so_ver_line)
lib_info["glibc"] = match.group(1)
return lib_info
def _get_sparc_arch_ver():
# http://developers.sun.com/solaris/developer/support/driver/64bit-faqs.html#QA12.12
# http://docs.sun.com/app/docs/doc/816-5175/isalist-5
o = os.popen("isalist")
instruct_sets = o.read().split()
retval = o.close()
if retval:
raise InternalError("error determining SPARC architecture version")
first = instruct_sets[0]
if first.startswith("sparcv9"):
return '9'
elif first.startswith("sparcv8"):
return '8'
elif first.startswith("sparcv7"):
return '7'
elif first == "sparc":
return '8'
else:
import warnings
warnings.warn("could not determine SPARC architecture version "
"from first `isalist` output: %r" % first)
return None
def _get_hpux_parisc_arch_ver():
assert sys.platform.startswith("hp-ux")
# Get the model name from `model` and parse out the model name, e.g:
# 9000/800/L2000-44 -> L2000-44
# 9000/800/A180c -> A180c
o = os.popen("model")
model = o.read().strip()
retval = o.close()
if retval:
raise InternalError("error determining HP-UX PA-RISC model name")
model = model.split('/')[-1]
# Lookup the model name in sched.models model database.
sched_models_paths = [
"/usr/sam/lib/mo/sched.models",
"/opt/langtools/lib/sched.models"
]
for sched_models_path in sched_models_paths:
fin = open(sched_models_path, 'r')
try:
for line in fin:
if line.lstrip().startswith("/*"): continue
db_model, db_arch, db_paname = line.split()
if db_model.lower() == model.lower():
# e.g. "1.1e" -> "1.1"
arch_ver_pat = re.compile(r"^(\d\.\d)")
return arch_ver_pat.search(db_arch).group(1)
finally:
fin.close()
raise InternalError("could not find '%s' model name in HP-UX "
"PA-RISC model database, '%s'"
% (model, sched_models_path))
def _create_temp_dir():
"""Create a temporary directory and return the path to it."""
if hasattr(tempfile, "mkdtemp"): # use the newer mkdtemp is available
path = tempfile.mkdtemp()
else:
path = tempfile.mktemp()
os.makedirs(path)
return path
def _rmtree_on_error(rmFunction, filePath, excInfo):
if excInfo[0] == OSError:
# presuming because file is read-only
octal_0o777 = 511 # this literal is only supported on Python2.6 or above
os.chmod(filePath, octal_0o777)
rmFunction(filePath)
def _rmtree(dirname):
import shutil
shutil.rmtree(dirname, 0, _rmtree_on_error)
#---- mainline
# Recipe: banner (1.0) in C:\trentm\tm\recipes\cookbook
def _banner(text, ch='=', length=78):
"""Return a banner line centering the given text.
"text" is the text to show in the banner. None can be given to have
no text.
"ch" (optional, default '=') is the banner line character (can
also be a short string to repeat).
"length" (optional, default 78) is the length of banner to make.
Examples:
>>> banner("Peggy Sue")
'================================= Peggy Sue =================================='
>>> banner("Peggy Sue", ch='-', length=50)
'------------------- Peggy Sue --------------------'
>>> banner("Pretty pretty pretty pretty Peggy Sue", length=40)
'Pretty pretty pretty pretty Peggy Sue'
"""
if text is None:
return ch * length
elif len(text) + 2 + len(ch)*2 > length:
# Not enough space for even one line char (plus space) around text.
return text
else:
remain = length - (len(text) + 2)
prefix_len = remain / 2
suffix_len = remain - prefix_len
if len(ch) == 1:
prefix = ch * prefix_len
suffix = ch * suffix_len
else:
prefix = ch * (prefix_len/len(ch)) + ch[:prefix_len%len(ch)]
suffix = ch * (suffix_len/len(ch)) + ch[:suffix_len%len(ch)]
return prefix + ' ' + text + ' ' + suffix
# Recipe: pretty_logging (0.1) in C:\trentm\tm\recipes\cookbook
class _PerLevelFormatter(logging.Formatter):
"""Allow multiple format string -- depending on the log level.
A "fmtFromLevel" optional arg is added to the constructor. It can be
a dictionary mapping a log record level to a format string. The
usual "fmt" argument acts as the default.
"""
def __init__(self, fmt=None, datefmt=None, fmtFromLevel=None):
logging.Formatter.__init__(self, fmt, datefmt)
if fmtFromLevel is None:
self.fmtFromLevel = {}
else:
self.fmtFromLevel = fmtFromLevel
def format(self, record):
record.levelname = record.levelname.lower()
if record.levelno in self.fmtFromLevel:
#XXX This is a non-threadsafe HACK. Really the base Formatter
# class should provide a hook accessor for the _fmt
# attribute. *Could* add a lock guard here (overkill?).
_saved_fmt = self._fmt
self._fmt = self.fmtFromLevel[record.levelno]
try:
return logging.Formatter.format(self, record)
finally:
self._fmt = _saved_fmt
else:
return logging.Formatter.format(self, record)
def _setup_logging():
hdlr = logging.StreamHandler()
defaultFmt = "%(name)s: %(levelname)s: %(message)s"
infoFmt = "%(name)s: %(message)s"
fmtr = _PerLevelFormatter(fmt=defaultFmt,
fmtFromLevel={logging.INFO: infoFmt})
hdlr.setFormatter(fmtr)
logging.root.addHandler(hdlr)
def main(argv=None):
import optparse
if argv is None:
argv = sys.argv
_setup_logging()
usage = "usage: %prog [NAME-RULES...]"
version = "%prog "+__version__
desc = """\
Determine and display platform information. 'platinfo' is really
designed to be used as a Python module. See the module docstring for
more information."""
parser = optparse.OptionParser(prog="platinfo", usage=usage,
version=version,
description=desc)
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("-q", "--quiet", dest="log_level",
action="store_const", const=logging.WARNING,
help="quieter output")
parser.add_option("-n", "--name", action="store_const",
dest="format", const="name",
help="output string name")
parser.add_option("-f", "--full-name", action="store_const",
dest="format", const="fullname",
help="a more detailed string name")
parser.add_option("-d", "--dict", action="store_const",
dest="format", const="dict",
help="output Python dict representation")
parser.add_option("-x", "--xml", action="store_const",
dest="format", const="xml",
help="output XML representation")
parser.add_option("-y", "--yaml", action="store_const",
dest="format", const="yaml",
help="output YAML representation")
parser.add_option("-a", "--all", action="store_const",
dest="format", const="all",
help="output all representations")
parser.set_defaults(log_level=logging.INFO)
opts, rules = parser.parse_args()
log.setLevel(opts.log_level)
pi = PlatInfo()
WIDTH=75
if opts.format is None:
if rules:
print(pi.name(*rules))
else:
print("%s (%s)" % (pi.name(), pi.fullname()))
if opts.format == "name":
print(pi.name(*rules))
if opts.format == "fullname":
print(pi.fullname())
elif opts.format == "dict":
if sys.version_info[:2] >= (2,4):
pprint(pi.as_dict(), width=WIDTH)
else:
from pprint import PrettyPrinter
pp = PrettyPrinter(width=WIDTH)
pp.pprint(pi.as_dict())
elif opts.format == "xml":
print(pi.as_xml())
elif opts.format == "yaml":
print(pi.as_yaml())
elif opts.format == "all":
print(_banner("platform info", length=WIDTH))
print(pi.name(*rules))
print(_banner("as_dict", '-', length=WIDTH))
if sys.version_info[:2] >= (2,4):
pprint(pi.as_dict(), width=WIDTH)
else:
from pprint import PrettyPrinter
pp = PrettyPrinter(width=WIDTH)
pp.pprint(pi.as_dict())
print(_banner("as_xml", '-', length=WIDTH))
print(pi.as_xml())
print(_banner("as_yaml", '-', length=WIDTH))
print(pi.as_yaml())
print(_banner(None, length=WIDTH))
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
from __future__ import with_statement
from sqlalchemy import (
testing, exc as sa_exc, event, String, Column, Table, select, func)
from sqlalchemy.testing import (
fixtures, engines, eq_, assert_raises, assert_raises_message,
assert_warnings, mock, expect_warnings)
from sqlalchemy.orm import (
exc as orm_exc, Session, mapper, sessionmaker, create_session,
relationship, attributes)
from sqlalchemy.testing.util import gc_collect
from test.orm._fixtures import FixtureTest
class SessionTransactionTest(FixtureTest):
run_inserts = None
__backend__ = True
def test_no_close_transaction_on_flush(self):
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
try:
mapper(User, users)
s = create_session(bind=c)
s.begin()
tran = s.transaction
s.add(User(name='first'))
s.flush()
c.execute("select * from users")
u = User(name='two')
s.add(u)
s.flush()
u = User(name='third')
s.add(u)
s.flush()
assert s.transaction is tran
tran.close()
finally:
c.close()
@engines.close_open_connections
def test_subtransaction_on_external(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
conn = testing.db.connect()
trans = conn.begin()
sess = create_session(bind=conn, autocommit=False, autoflush=True)
sess.begin(subtransactions=True)
u = User(name='ed')
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
trans.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
@engines.close_open_connections
def test_external_nested_transaction(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
try:
conn = testing.db.connect()
trans = conn.begin()
sess = create_session(bind=conn, autocommit=False,
autoflush=True)
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.begin_nested()
u2 = User(name='u2')
sess.add(u2)
sess.flush()
sess.rollback()
trans.commit()
assert len(sess.query(User).all()) == 1
except:
conn.close()
raise
@testing.requires.savepoints
def test_nested_accounting_new_items_removed(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(bind=testing.db)
session.begin()
session.begin_nested()
u1 = User(name='u1')
session.add(u1)
session.commit()
assert u1 in session
session.rollback()
assert u1 not in session
@testing.requires.savepoints
def test_nested_accounting_deleted_items_restored(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(bind=testing.db)
session.begin()
u1 = User(name='u1')
session.add(u1)
session.commit()
session.begin()
u1 = session.query(User).first()
session.begin_nested()
session.delete(u1)
session.commit()
assert u1 not in session
session.rollback()
assert u1 in session
@testing.requires.savepoints
def test_heavy_nesting(self):
users = self.tables.users
session = create_session(bind=testing.db)
session.begin()
session.connection().execute(users.insert().values(
name='user1'))
session.begin(subtransactions=True)
session.begin_nested()
session.connection().execute(users.insert().values(
name='user2'))
assert session.connection().execute(
'select count(1) from users').scalar() == 2
session.rollback()
assert session.connection().execute(
'select count(1) from users').scalar() == 1
session.connection().execute(users.insert().values(
name='user3'))
session.commit()
assert session.connection().execute(
'select count(1) from users').scalar() == 2
@testing.requires.savepoints
def test_dirty_state_transferred_deep_nesting(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session(testing.db)
u1 = User(name='u1')
s.add(u1)
s.commit()
nt1 = s.begin_nested()
nt2 = s.begin_nested()
u1.name = 'u2'
assert attributes.instance_state(u1) not in nt2._dirty
assert attributes.instance_state(u1) not in nt1._dirty
s.flush()
assert attributes.instance_state(u1) in nt2._dirty
assert attributes.instance_state(u1) not in nt1._dirty
s.commit()
assert attributes.instance_state(u1) in nt2._dirty
assert attributes.instance_state(u1) in nt1._dirty
s.rollback()
assert attributes.instance_state(u1).expired
eq_(u1.name, 'u1')
@testing.requires.independent_connections
def test_transactions_isolated(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s1 = create_session(bind=testing.db, autocommit=False)
s2 = create_session(bind=testing.db, autocommit=False)
u1 = User(name='u1')
s1.add(u1)
s1.flush()
assert s2.query(User).all() == []
@testing.requires.two_phase_transactions
def test_twophase(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
# TODO: mock up a failure condition here
# to ensure a rollback succeeds
mapper(User, users)
mapper(Address, addresses)
engine2 = engines.testing_engine()
sess = create_session(autocommit=True, autoflush=False,
twophase=True)
sess.bind_mapper(User, testing.db)
sess.bind_mapper(Address, engine2)
sess.begin()
u1 = User(name='u1')
a1 = Address(email_address='u1@e')
sess.add_all((u1, a1))
sess.commit()
sess.close()
engine2.dispose()
assert users.count().scalar() == 1
assert addresses.count().scalar() == 1
@testing.requires.independent_connections
def test_invalidate(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
u = User(name='u1')
sess.add(u)
sess.flush()
c1 = sess.connection(User)
sess.invalidate()
assert c1.invalidated
eq_(sess.query(User).all(), [])
c2 = sess.connection(User)
assert not c2.invalidated
def test_subtransaction_on_noautocommit(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session(autocommit=False, autoflush=True)
sess.begin(subtransactions=True)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
sess.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
def test_nested_transaction(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
sess.begin()
u = User(name='u1')
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name='u2')
sess.add(u2)
sess.flush()
sess.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_autotrans(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session(autocommit=False)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name='u2')
sess.add(u2)
sess.flush()
sess.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_transaction_connection_add(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin_nested()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.rollback()
u2 = User(name='u2')
sess.add(u2)
sess.commit()
eq_(set(sess.query(User).all()), set([u2]))
sess.begin()
sess.begin_nested()
u3 = User(name='u3')
sess.add(u3)
sess.commit() # commit the nested transaction
sess.rollback()
eq_(set(sess.query(User).all()), set([u2]))
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_control(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin_nested()
transaction = sess.begin(subtransactions=True)
sess.add(User(name='u1'))
transaction.commit()
sess.commit()
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
t1 = sess.begin()
t2 = sess.begin_nested()
sess.add(User(name='u2'))
t2.commit()
assert sess.transaction is t1
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_close(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=False)
sess.begin_nested()
sess.add(User(name='u1'))
sess.flush()
sess.close()
sess.add(User(name='u2'))
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
def test_continue_flushing_on_commit(self):
"""test that post-flush actions get flushed also if
we're in commit()"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
to_flush = [User(name='ed'), User(name='jack'), User(name='wendy')]
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
if to_flush:
session.add(to_flush.pop(0))
x = [1]
@event.listens_for(sess, "after_commit") # noqa
def add_another_user(session):
x[0] += 1
sess.add(to_flush.pop())
sess.commit()
eq_(x, [2])
eq_(
sess.scalar(select([func.count(users.c.id)])), 3
)
def test_continue_flushing_guard(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
session.add(User(name='x'))
sess.add(User(name='x'))
assert_raises_message(
orm_exc.FlushError,
"Over 100 subsequent flushes have occurred",
sess.commit
)
def test_error_on_using_inactive_session_commands(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin(subtransactions=True)
sess.add(User(name='u1'))
sess.flush()
sess.rollback()
assert_raises_message(sa_exc.InvalidRequestError,
"This Session's transaction has been "
r"rolled back by a nested rollback\(\) "
"call. To begin a new transaction, "
r"issue Session.rollback\(\) first.",
sess.begin, subtransactions=True)
sess.close()
def test_no_sql_during_commit(self):
sess = create_session(bind=testing.db, autocommit=False)
@event.listens_for(sess, "after_commit")
def go(session):
session.execute("select 1")
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction.",
sess.commit)
def test_no_sql_during_prepare(self):
sess = create_session(bind=testing.db, autocommit=False, twophase=True)
sess.prepare()
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction.",
sess.execute, "select 1")
def test_no_prepare_wo_twophase(self):
sess = create_session(bind=testing.db, autocommit=False)
assert_raises_message(sa_exc.InvalidRequestError,
"'twophase' mode not enabled, or not root "
"transaction; can't prepare.",
sess.prepare)
def test_closed_status_check(self):
sess = create_session()
trans = sess.begin()
trans.rollback()
assert_raises_message(
sa_exc.ResourceClosedError, "This transaction is closed",
trans.rollback)
assert_raises_message(
sa_exc.ResourceClosedError, "This transaction is closed",
trans.commit)
def test_deactive_status_check(self):
sess = create_session()
trans = sess.begin()
trans2 = sess.begin(subtransactions=True)
trans2.rollback()
assert_raises_message(
sa_exc.InvalidRequestError,
"This Session's transaction has been rolled back by a nested "
"rollback\(\) call. To begin a new transaction, issue "
"Session.rollback\(\) first.",
trans.commit
)
def test_deactive_status_check_w_exception(self):
sess = create_session()
trans = sess.begin()
trans2 = sess.begin(subtransactions=True)
try:
raise Exception("test")
except:
trans2.rollback(_capture_exception=True)
assert_raises_message(
sa_exc.InvalidRequestError,
"This Session's transaction has been rolled back due to a "
"previous exception during flush. To begin a new transaction "
"with this Session, first issue Session.rollback\(\). "
"Original exception was: test",
trans.commit
)
def _inactive_flushed_session_fixture(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='u1')
sess.add(u1)
sess.commit()
sess.add(User(id=1, name='u2'))
assert_raises(
orm_exc.FlushError, sess.flush
)
return sess, u1
def test_execution_options_begin_transaction(self):
bind = mock.Mock()
sess = Session(bind=bind)
c1 = sess.connection(execution_options={'isolation_level': 'FOO'})
eq_(
bind.mock_calls,
[
mock.call.contextual_connect(),
mock.call.contextual_connect().
execution_options(isolation_level='FOO'),
mock.call.contextual_connect().execution_options().begin()
]
)
eq_(c1, bind.contextual_connect().execution_options())
def test_execution_options_ignored_mid_transaction(self):
bind = mock.Mock()
conn = mock.Mock(engine=bind)
bind.contextual_connect = mock.Mock(return_value=conn)
sess = Session(bind=bind)
sess.execute("select 1")
with expect_warnings(
"Connection is already established for the "
"given bind; execution_options ignored"):
sess.connection(execution_options={'isolation_level': 'FOO'})
def test_warning_on_using_inactive_session_new(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
u2 = User(name='u2')
sess.add(u2)
def go():
sess.rollback()
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u2 not in sess
assert u1 in sess
def test_warning_on_using_inactive_session_dirty(self):
sess, u1 = self._inactive_flushed_session_fixture()
u1.name = 'newname'
def go():
sess.rollback()
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u1 in sess
assert u1 not in sess.dirty
def test_warning_on_using_inactive_session_delete(self):
sess, u1 = self._inactive_flushed_session_fixture()
sess.delete(u1)
def go():
sess.rollback()
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u1 in sess
assert u1 not in sess.deleted
def test_warning_on_using_inactive_session_rollback_evt(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='u1')
sess.add(u1)
sess.commit()
u3 = User(name='u3')
@event.listens_for(sess, "after_rollback")
def evt(s):
sess.add(u3)
sess.add(User(id=1, name='u2'))
def go():
assert_raises(
orm_exc.FlushError, sess.flush
)
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u3 not in sess
def test_preserve_flush_error(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
for i in range(5):
assert_raises_message(sa_exc.InvalidRequestError,
"^This Session's transaction has been "
r"rolled back due to a previous exception "
"during flush. To "
"begin a new transaction with this "
"Session, first issue "
r"Session.rollback\(\). Original exception "
"was:",
sess.commit)
sess.rollback()
sess.add(User(id=5, name='some name'))
sess.commit()
def test_no_autocommit_with_explicit_commit(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(autocommit=False)
session.add(User(name='ed'))
session.transaction.commit()
assert session.transaction is not None, \
'autocommit=False should start a new transaction'
class _LocalFixture(FixtureTest):
run_setup_mappers = 'once'
run_inserts = None
session = sessionmaker()
@classmethod
def setup_mappers(cls):
User, Address = cls.classes.User, cls.classes.Address
users, addresses = cls.tables.users, cls.tables.addresses
mapper(
User, users, properties={
'addresses': relationship(
Address, backref='user', cascade="all, delete-orphan",
order_by=addresses.c.id),
})
mapper(Address, addresses)
class FixtureDataTest(_LocalFixture):
run_inserts = 'each'
__backend__ = True
def test_attrs_on_rollback(self):
User = self.classes.User
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.rollback()
eq_(u1.name, 'jack')
def test_commit_persistent(self):
User = self.classes.User
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.flush()
sess.commit()
eq_(u1.name, 'ed')
def test_concurrent_commit_persistent(self):
User = self.classes.User
s1 = self.session()
u1 = s1.query(User).get(7)
u1.name = 'ed'
s1.commit()
s2 = self.session()
u2 = s2.query(User).get(7)
assert u2.name == 'ed'
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class CleanSavepointTest(FixtureTest):
"""test the behavior for [ticket:2452] - rollback on begin_nested()
only expires objects tracked as being modified in that transaction.
"""
run_inserts = None
__backend__ = True
def _run_test(self, update_fn):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session(bind=testing.db)
u1 = User(name='u1')
u2 = User(name='u2')
s.add_all([u1, u2])
s.commit()
u1.name
u2.name
s.begin_nested()
update_fn(s, u2)
eq_(u2.name, 'u2modified')
s.rollback()
eq_(u1.__dict__['name'], 'u1')
assert 'name' not in u2.__dict__
eq_(u2.name, 'u2')
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint(self):
def update_fn(s, u2):
u2.name = 'u2modified'
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_eval(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name='u2').update(
dict(name='u2modified'), synchronize_session='evaluate')
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_fetch(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name='u2').update(
dict(name='u2modified'),
synchronize_session='fetch')
self._run_test(update_fn)
class ContextManagerTest(FixtureTest):
run_inserts = None
__backend__ = True
@testing.requires.savepoints
@engines.close_open_connections
def test_contextmanager_nested_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
def go():
with sess.begin_nested():
sess.add(User()) # name can't be null
sess.flush()
# and not InvalidRequestError
assert_raises(
sa_exc.DBAPIError,
go
)
with sess.begin_nested():
sess.add(User(name='u1'))
eq_(sess.query(User).count(), 1)
def test_contextmanager_commit(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session(autocommit=True)
with sess.begin():
sess.add(User(name='u1'))
sess.rollback()
eq_(sess.query(User).count(), 1)
def test_contextmanager_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session(autocommit=True)
def go():
with sess.begin():
sess.add(User()) # name can't be null
assert_raises(
sa_exc.DBAPIError,
go
)
eq_(sess.query(User).count(), 0)
with sess.begin():
sess.add(User(name='u1'))
eq_(sess.query(User).count(), 1)
class AutoExpireTest(_LocalFixture):
__backend__ = True
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = self.session()
u2 = User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
def test_trans_pending_cleared_on_commit(self):
User = self.classes.User
sess = self.session()
u2 = User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.commit()
assert u2 in sess
u3 = User(name='anotheruser')
sess.add(u3)
sess.rollback()
assert u3 not in sess
assert u2 in sess
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
# this actually tests that the delete() operation,
# when cascaded to the "addresses" collection, does not
# trigger a flush (via lazyload) before the cascade is complete.
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
@testing.requires.predictable_gc
def test_gced_delete_on_rollback(self):
User, users = self.classes.User, self.tables.users
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
u1_state = attributes.instance_state(u1)
assert u1_state in s.identity_map.all_states()
assert u1_state in s._deleted
s.flush()
assert u1_state not in s.identity_map.all_states()
assert u1_state not in s._deleted
del u1
gc_collect()
assert u1_state.obj() is None
s.rollback()
# new in 1.1, not in identity map if the object was
# gc'ed and we restore snapshot; we've changed update_impl
# to just skip this object
assert u1_state not in s.identity_map.all_states()
# in any version, the state is replaced by the query
# because the identity map would switch it
u1 = s.query(User).filter_by(name='ed').one()
assert u1_state not in s.identity_map.all_states()
assert s.scalar(users.count()) == 1
s.delete(u1)
s.flush()
assert s.scalar(users.count()) == 0
s.commit()
def test_trans_deleted_cleared_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
s.commit()
assert u1 not in s
s.rollback()
assert u1 not in s
def test_update_deleted_on_rollback_cascade(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
s.delete(u1)
assert u1 in s.deleted
assert u1.addresses[0] in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
assert u1.addresses[0] not in s.deleted
def test_update_deleted_on_rollback_orphan(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
a1 = u1.addresses[0]
u1.addresses.remove(a1)
s.flush()
eq_(s.query(Address).filter(Address.email_address == 'foo').all(), [])
s.rollback()
assert a1 not in s.deleted
assert u1.addresses == [a1]
def test_commit_pending(self):
User = self.classes.User
sess = self.session()
u1 = User(name='newuser')
sess.add(u1)
sess.flush()
sess.commit()
eq_(u1.name, 'newuser')
def test_concurrent_commit_pending(self):
User = self.classes.User
s1 = self.session()
u1 = User(name='edward')
s1.add(u1)
s1.commit()
s2 = self.session()
u2 = s2.query(User).filter(User.name == 'edward').one()
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class TwoPhaseTest(_LocalFixture):
__backend__ = True
@testing.requires.two_phase_transactions
def test_rollback_on_prepare(self):
User = self.classes.User
s = self.session(twophase=True)
u = User(name='ed')
s.add(u)
s.prepare()
s.rollback()
assert u not in s
class RollbackRecoverTest(_LocalFixture):
__backend__ = True
def test_pk_violation(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.add(u2)
assert_raises(orm_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
assert u1.name == 'ed'
assert a1.email_address == 'foo'
u1.name = 'edward'
a1.email_address = 'foober'
s.commit()
eq_(
s.query(User).all(),
[User(id=1, name='edward',
addresses=[Address(email_address='foober')])]
)
@testing.requires.savepoints
def test_pk_violation_with_savepoint(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.begin_nested()
s.add(u2)
assert_raises(orm_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
s.commit()
eq_(
s.query(User).all(),
[
User(
id=1, name='edward',
addresses=[Address(email_address='foober')])])
class SavepointTest(_LocalFixture):
__backend__ = True
@testing.requires.savepoints
def test_savepoint_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
s.rollback()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(
s.query(User.name).order_by(User.id).all(),
[('ed',), ('jack',)])
s.commit()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(s.query(User.name).order_by(User.id).all(), [('ed',), ('jack',)])
@testing.requires.savepoints
def test_savepoint_delete(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 1)
s.begin_nested()
s.delete(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 0)
s.commit()
@testing.requires.savepoints
def test_savepoint_commit(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
s.commit()
def go():
assert u1.name == 'edward'
assert u2.name == 'jackward'
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
self.assert_sql_count(testing.db, go, 1)
s.commit()
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
@testing.requires.savepoints
def test_savepoint_rollback_collections(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name = 'edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
])
s.rollback()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
])
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
]
)
@testing.requires.savepoints
def test_savepoint_commit_collections(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name = 'edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
@testing.requires.savepoints
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = self.session()
sess.begin_nested()
u2 = User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
@testing.requires.savepoints
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.begin_nested()
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
class AccountingFlagsTest(_LocalFixture):
__backend__ = True
def test_no_expire_on_commit(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(expire_on_commit=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
testing.db.execute(
users.update(users.c.name == 'ed').values(name='edward'))
assert u1.name == 'ed'
sess.expire_all()
assert u1.name == 'edward'
def test_rollback_no_accounting(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(
users.update(users.c.name == 'ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.expire_all()
assert u1.name == 'edward'
def test_commit_no_accounting(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(
users.update(users.c.name == 'ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == \
[('edwardo',)]
assert u1.name == 'edwardo'
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
User, users = self.classes.User, self.tables.users
sess = Session(
_enable_transaction_accounting=False, autocommit=True,
autoflush=False)
u1 = User(name='ed')
sess.add(u1)
sess.flush()
sess.begin()
u1.name = 'edwardo'
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == \
[('ed',)]
class AutoCommitTest(_LocalFixture):
__backend__ = True
def test_begin_nested_requires_trans(self):
sess = create_session(autocommit=True)
assert_raises(sa_exc.InvalidRequestError, sess.begin_nested)
def test_begin_preflush(self):
User = self.classes.User
sess = create_session(autocommit=True)
u1 = User(name='ed')
sess.add(u1)
sess.begin()
u2 = User(name='some other user')
sess.add(u2)
sess.rollback()
assert u2 not in sess
assert u1 in sess
assert sess.query(User).filter_by(name='ed').one() is u1
def test_accounting_commit_fails_add(self):
User = self.classes.User
sess = create_session(autocommit=True)
fail = False
def fail_fn(*arg, **kw):
if fail:
raise Exception("commit fails")
event.listen(sess, "after_flush_postexec", fail_fn)
u1 = User(name='ed')
sess.add(u1)
fail = True
assert_raises(
Exception,
sess.flush
)
fail = False
assert u1 not in sess
u1new = User(id=2, name='fred')
sess.add(u1new)
sess.add(u1)
sess.flush()
assert u1 in sess
eq_(
sess.query(User.name).order_by(User.name).all(),
[('ed', ), ('fred',)]
)
def test_accounting_commit_fails_delete(self):
User = self.classes.User
sess = create_session(autocommit=True)
fail = False
def fail_fn(*arg, **kw):
if fail:
raise Exception("commit fails")
event.listen(sess, "after_flush_postexec", fail_fn)
u1 = User(name='ed')
sess.add(u1)
sess.flush()
sess.delete(u1)
fail = True
assert_raises(
Exception,
sess.flush
)
fail = False
assert u1 in sess
assert u1 not in sess.deleted
sess.delete(u1)
sess.flush()
assert u1 not in sess
eq_(
sess.query(User.name).order_by(User.name).all(),
[]
)
@testing.requires.updateable_autoincrement_pks
def test_accounting_no_select_needed(self):
"""test that flush accounting works on non-expired instances
when autocommit=True/expire_on_commit=True."""
User = self.classes.User
sess = create_session(autocommit=True, expire_on_commit=True)
u1 = User(id=1, name='ed')
sess.add(u1)
sess.flush()
u1.id = 3
u1.name = 'fred'
self.assert_sql_count(testing.db, sess.flush, 1)
assert 'id' not in u1.__dict__
eq_(u1.id, 3)
class NaturalPKRollbackTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('users', metadata, Column('name', String(50), primary_key=True))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
def test_rollback_recover(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
session = sessionmaker()()
u1, u2, u3 = User(name='u1'), User(name='u2'), User(name='u3')
session.add_all([u1, u2, u3])
session.commit()
session.delete(u2)
u4 = User(name='u2')
session.add(u4)
session.flush()
u5 = User(name='u3')
session.add(u5)
assert_raises(orm_exc.FlushError, session.flush)
assert u5 not in session
assert u2 not in session.deleted
session.rollback()
def test_key_replaced_by_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name='u1')
u2 = User(name='u2')
s = Session()
s.add_all([u1, u2])
s.commit()
s.delete(u1)
s.flush()
u2.name = 'u1'
s.flush()
assert u1 not in s
s.rollback()
assert u1 in s
assert u2 in s
assert s.identity_map[(User, ('u1',))] is u1
assert s.identity_map[(User, ('u2',))] is u2
def test_multiple_key_replaced_by_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name='u1')
u2 = User(name='u2')
u3 = User(name='u3')
s = Session()
s.add_all([u1, u2, u3])
s.commit()
s.delete(u1)
s.delete(u2)
s.flush()
u3.name = 'u1'
s.flush()
u3.name = 'u2'
s.flush()
s.rollback()
assert u1 in s
assert u2 in s
assert u3 in s
assert s.identity_map[(User, ('u1',))] is u1
assert s.identity_map[(User, ('u2',))] is u2
assert s.identity_map[(User, ('u3',))] is u3
def test_key_replaced_by_oob_insert(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name='u1')
s = Session()
s.add(u1)
s.commit()
s.delete(u1)
s.flush()
s.execute(users.insert().values(name='u1'))
u2 = s.query(User).get('u1')
assert u1 not in s
s.rollback()
assert u1 in s
assert u2 not in s
assert s.identity_map[(User, ('u1',))] is u1
|
|
'''
Unit tests for oc scale
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_scale import OCScale, locate_oc_binary # noqa: E402
class OCScaleTest(unittest.TestCase):
'''
Test class for OCVersion
'''
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a list '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 2,
'state': 'list',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 2,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['result'][0], 2)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_present(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a state present '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 2,
'state': 'present',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 2,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['state'], 'present')
self.assertEqual(results['result'][0], 2)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale_up(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a scale up '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 3,
'state': 'present',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 2,
}
}'''
dc_updated = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6559",
"generation": 9,
"creationTimestamp": "2017-01-24T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 3,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc replace',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc_updated,
'returncode': 0}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertTrue(results['changed'])
self.assertEqual(results['state'], 'present')
self.assertEqual(results['result'][0], 3)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale_down(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a scale down '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 1,
'state': 'present',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 2,
}
}'''
dc_updated = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6560",
"generation": 9,
"creationTimestamp": "2017-01-24T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 1,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc replace',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc_updated,
'returncode': 0}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertTrue(results['changed'])
self.assertEqual(results['state'], 'present')
self.assertEqual(results['result'][0], 1)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale_failed(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a scale failure '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 1,
'state': 'present',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 2,
}
}'''
error_message = "foo"
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc replace',
'results': error_message,
'returncode': 1}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertTrue(results['failed'])
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_unknown(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing an unknown state '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 2,
'state': 'unknown-state',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 2,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertFalse('changed' in results)
self.assertEqual(results['failed'], True)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing scale '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 3,
'state': 'list',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
dc = '''{"kind": "DeploymentConfig",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 3,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get dc router -n default',
'results': dc,
'returncode': 0},
{"cmd": '/usr/bin/oc create -f /tmp/router -n default',
'results': '',
'returncode': 0}
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['result'][0], 3)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale_rc(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing scale for replication controllers '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 3,
'state': 'list',
'kind': 'rc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
rc = '''{"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "router",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
"uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
"resourceVersion": "6558",
"generation": 8,
"creationTimestamp": "2017-01-23T20:58:07Z",
"labels": {
"router": "router"
}
},
"spec": {
"replicas": 3,
}
}'''
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc get rc router -n default',
'results': rc,
'returncode': 0},
{"cmd": '/usr/bin/oc create -f /tmp/router -n default',
'results': '',
'returncode': 0}
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['result'][0], 3)
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_no_dc_scale(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing scale for inexisting dc '''
params = {'name': 'not_there',
'namespace': 'default',
'replicas': 3,
'state': 'present',
'kind': 'dc',
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
mock_openshift_cmd.side_effect = [
{"cmd": '/usr/bin/oc -n default get dc not_there -o json',
'results': [{}],
'returncode': 1,
'stderr': "Error from server: deploymentconfigs \"not_there\" not found\n",
'stdout': ""},
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = OCScale.run_ansible(params, False)
self.assertTrue(results['failed'])
self.assertEqual(results['msg']['returncode'], 1)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback in py3 '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path in py3 '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin in py3 '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin in py3 '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
|
|
#! /usr/bin/env python
# encoding: utf-8
# DC 2008
# Thomas Nagy 2016-2018 (ita)
import os, re, traceback
from waflib import Utils, Logs, Errors
from waflib.Tools import fc, fc_config, fc_scan, ar, ccroot
from waflib.Configure import conf
from waflib.TaskGen import after_method, feature
@conf
def find_ifort(conf):
fc = conf.find_program('ifort', var='FC')
conf.get_ifort_version(fc)
conf.env.FC_NAME = 'IFORT'
@conf
def ifort_modifier_win32(self):
v = self.env
v.IFORT_WIN32 = True
v.FCSTLIB_MARKER = ''
v.FCSHLIB_MARKER = ''
v.FCLIB_ST = v.FCSTLIB_ST = '%s.lib'
v.FCLIBPATH_ST = v.STLIBPATH_ST = '/LIBPATH:%s'
v.FCINCPATH_ST = '/I%s'
v.FCDEFINES_ST = '/D%s'
v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe'
v.fcshlib_PATTERN = '%s.dll'
v.fcstlib_PATTERN = v.implib_PATTERN = '%s.lib'
v.FCLNK_TGT_F = '/out:'
v.FC_TGT_F = ['/c', '/o', '']
v.FCFLAGS_fcshlib = ''
v.LINKFLAGS_fcshlib = '/DLL'
v.AR_TGT_F = '/out:'
v.IMPLIB_ST = '/IMPLIB:%s'
v.append_value('LINKFLAGS', '/subsystem:console')
if v.IFORT_MANIFEST:
v.append_value('LINKFLAGS', ['/MANIFEST'])
@conf
def ifort_modifier_darwin(conf):
fc_config.fortran_modifier_darwin(conf)
@conf
def ifort_modifier_platform(conf):
dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform()
ifort_modifier_func = getattr(conf, 'ifort_modifier_' + dest_os, None)
if ifort_modifier_func:
ifort_modifier_func()
@conf
def get_ifort_version(conf, fc):
"""
Detects the compiler version and sets ``conf.env.FC_VERSION``
"""
version_re = re.compile(r"\bIntel\b.*\bVersion\s*(?P<major>\d*)\.(?P<minor>\d*)",re.I).search
if Utils.is_win32:
cmd = fc
else:
cmd = fc + ['-logo']
out, err = fc_config.getoutput(conf, cmd, stdin=False)
match = version_re(out) or version_re(err)
if not match:
conf.fatal('cannot determine ifort version.')
k = match.groupdict()
conf.env.FC_VERSION = (k['major'], k['minor'])
def configure(conf):
"""
Detects the Intel Fortran compilers
"""
if Utils.is_win32:
compiler, version, path, includes, libdirs, arch = conf.detect_ifort()
v = conf.env
v.DEST_CPU = arch
v.PATH = path
v.INCLUDES = includes
v.LIBPATH = libdirs
v.MSVC_COMPILER = compiler
try:
v.MSVC_VERSION = float(version)
except ValueError:
v.MSVC_VERSION = float(version[:-3])
conf.find_ifort_win32()
conf.ifort_modifier_win32()
else:
conf.find_ifort()
conf.find_program('xiar', var='AR')
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.ifort_modifier_platform()
all_ifort_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')]
"""List of icl platforms"""
@conf
def gather_ifort_versions(conf, versions):
"""
List compiler versions by looking up registry keys
"""
version_pattern = re.compile(r'^...?.?\....?.?')
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\Fortran')
except OSError:
try:
all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\Fortran')
except OSError:
return
index = 0
while 1:
try:
version = Utils.winreg.EnumKey(all_versions, index)
except OSError:
break
index += 1
if not version_pattern.match(version):
continue
targets = {}
for target,arch in all_ifort_platforms:
if target=='intel64':
targetDir='EM64T_NATIVE'
else:
targetDir=target
try:
Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir)
icl_version=Utils.winreg.OpenKey(all_versions,version)
path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir')
except OSError:
pass
else:
batch_file=os.path.join(path,'bin','ifortvars.bat')
if os.path.isfile(batch_file):
targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file)
for target,arch in all_ifort_platforms:
try:
icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target)
path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir')
except OSError:
continue
else:
batch_file=os.path.join(path,'bin','ifortvars.bat')
if os.path.isfile(batch_file):
targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file)
major = version[0:2]
versions['intel ' + major] = targets
@conf
def setup_ifort(conf, versiondict):
"""
Checks installed compilers and targets and returns the first combination from the user's
options, env, or the global supported lists that checks.
:param versiondict: dict(platform -> dict(architecture -> configuration))
:type versiondict: dict(string -> dict(string -> target_compiler)
:return: the compiler, revision, path, include dirs, library paths and target architecture
:rtype: tuple of strings
"""
platforms = Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_ifort_platforms]
desired_versions = conf.env.MSVC_VERSIONS or list(reversed(list(versiondict.keys())))
for version in desired_versions:
try:
targets = versiondict[version]
except KeyError:
continue
for arch in platforms:
try:
cfg = targets[arch]
except KeyError:
continue
cfg.evaluate()
if cfg.is_valid:
compiler,revision = version.rsplit(' ', 1)
return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu
conf.fatal('ifort: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys())))
@conf
def get_ifort_version_win32(conf, compiler, version, target, vcvars):
# FIXME hack
try:
conf.msvc_cnt += 1
except AttributeError:
conf.msvc_cnt = 1
batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt)
batfile.write("""@echo off
set INCLUDE=
set LIB=
call "%s" %s
echo PATH=%%PATH%%
echo INCLUDE=%%INCLUDE%%
echo LIB=%%LIB%%;%%LIBPATH%%
""" % (vcvars,target))
sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()])
batfile.delete()
lines = sout.splitlines()
if not lines[0]:
lines.pop(0)
MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None
for line in lines:
if line.startswith('PATH='):
path = line[5:]
MSVC_PATH = path.split(';')
elif line.startswith('INCLUDE='):
MSVC_INCDIR = [i for i in line[8:].split(';') if i]
elif line.startswith('LIB='):
MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR):
conf.fatal('ifort: Could not find a valid architecture for building (get_ifort_version_win32)')
# Check if the compiler is usable at all.
# The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
env = dict(os.environ)
env.update(PATH = path)
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
fc = conf.find_program(compiler_name, path_list=MSVC_PATH)
# delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically.
if 'CL' in env:
del(env['CL'])
try:
conf.cmd_and_log(fc + ['/help'], env=env)
except UnicodeError:
st = traceback.format_exc()
if conf.logger:
conf.logger.error(st)
conf.fatal('ifort: Unicode error - check the code page?')
except Exception as e:
Logs.debug('ifort: get_ifort_version: %r %r %r -> failure %s', compiler, version, target, str(e))
conf.fatal('ifort: cannot run the compiler in get_ifort_version (run with -v to display errors)')
else:
Logs.debug('ifort: get_ifort_version: %r %r %r -> OK', compiler, version, target)
finally:
conf.env[compiler_name] = ''
return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
class target_compiler(object):
"""
Wraps a compiler configuration; call evaluate() to determine
whether the configuration is usable.
"""
def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None):
"""
:param ctx: configuration context to use to eventually get the version environment
:param compiler: compiler name
:param cpu: target cpu
:param version: compiler version number
:param bat_target: ?
:param bat: path to the batch file to run
:param callback: optional function to take the realized environment variables tup and map it (e.g. to combine other constant paths)
"""
self.conf = ctx
self.name = None
self.is_valid = False
self.is_done = False
self.compiler = compiler
self.cpu = cpu
self.version = version
self.bat_target = bat_target
self.bat = bat
self.callback = callback
def evaluate(self):
if self.is_done:
return
self.is_done = True
try:
vs = self.conf.get_ifort_version_win32(self.compiler, self.version, self.bat_target, self.bat)
except Errors.ConfigurationError:
self.is_valid = False
return
if self.callback:
vs = self.callback(self, vs)
self.is_valid = True
(self.bindirs, self.incdirs, self.libdirs) = vs
def __str__(self):
return str((self.bindirs, self.incdirs, self.libdirs))
def __repr__(self):
return repr((self.bindirs, self.incdirs, self.libdirs))
@conf
def detect_ifort(self):
return self.setup_ifort(self.get_ifort_versions(False))
@conf
def get_ifort_versions(self, eval_and_save=True):
"""
:return: platforms to compiler configurations
:rtype: dict
"""
dct = {}
self.gather_ifort_versions(dct)
return dct
def _get_prog_names(self, compiler):
if compiler=='intel':
compiler_name = 'ifort'
linker_name = 'XILINK'
lib_name = 'XILIB'
else:
# assumes CL.exe
compiler_name = 'CL'
linker_name = 'LINK'
lib_name = 'LIB'
return compiler_name, linker_name, lib_name
@conf
def find_ifort_win32(conf):
# the autodetection is supposed to be performed before entering in this method
v = conf.env
path = v.PATH
compiler = v.MSVC_COMPILER
version = v.MSVC_VERSION
compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
v.IFORT_MANIFEST = (compiler == 'intel' and version >= 11)
# compiler
fc = conf.find_program(compiler_name, var='FC', path_list=path)
# before setting anything, check if the compiler is really intel fortran
env = dict(conf.environ)
if path:
env.update(PATH = ';'.join(path))
if not conf.cmd_and_log(fc + ['/nologo', '/help'], env=env):
conf.fatal('not intel fortran compiler could not be identified')
v.FC_NAME = 'IFORT'
if not v.LINK_FC:
conf.find_program(linker_name, var='LINK_FC', path_list=path, mandatory=True)
if not v.AR:
conf.find_program(lib_name, path_list=path, var='AR', mandatory=True)
v.ARFLAGS = ['/nologo']
# manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
if v.IFORT_MANIFEST:
conf.find_program('MT', path_list=path, var='MT')
v.MTFLAGS = ['/nologo']
try:
conf.load('winres')
except Errors.WafError:
Logs.warn('Resource compiler not found. Compiling resource file is disabled')
#######################################################################################################
##### conf above, build below
@after_method('apply_link')
@feature('fc')
def apply_flags_ifort(self):
"""
Adds additional flags implied by msvc, such as subsystems and pdb files::
def build(bld):
bld.stlib(source='main.c', target='bar', subsystem='gruik')
"""
if not self.env.IFORT_WIN32 or not getattr(self, 'link_task', None):
return
is_static = isinstance(self.link_task, ccroot.stlink_task)
subsystem = getattr(self, 'subsystem', '')
if subsystem:
subsystem = '/subsystem:%s' % subsystem
flags = is_static and 'ARFLAGS' or 'LINKFLAGS'
self.env.append_value(flags, subsystem)
if not is_static:
for f in self.env.LINKFLAGS:
d = f.lower()
if d[1:] == 'debug':
pdbnode = self.link_task.outputs[0].change_ext('.pdb')
self.link_task.outputs.append(pdbnode)
if getattr(self, 'install_task', None):
self.pdb_install_task = self.add_install_files(install_to=self.install_task.install_to, install_from=pdbnode)
break
@feature('fcprogram', 'fcshlib', 'fcprogram_test')
@after_method('apply_link')
def apply_manifest_ifort(self):
"""
Enables manifest embedding in Fortran DLLs when using ifort on Windows
See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx
"""
if self.env.IFORT_WIN32 and getattr(self, 'link_task', None):
# it seems ifort.exe cannot be called for linking
self.link_task.env.FC = self.env.LINK_FC
if self.env.IFORT_WIN32 and self.env.IFORT_MANIFEST and getattr(self, 'link_task', None):
out_node = self.link_task.outputs[0]
man_node = out_node.parent.find_or_declare(out_node.name + '.manifest')
self.link_task.outputs.append(man_node)
self.env.DO_MANIFEST = True
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegexp(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.get_default_context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
|
|
# -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <[email protected]>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import string
import re
import dis
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing."""
if (isfunction(object) or ismethod(object)) and \
object.func_code.co_flags & CO_GENERATOR:
return True
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support interation over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isgenerator(object):
"""Return true if the object is a generator object."""
return isinstance(object, types.GeneratorType)
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
value = getattr(object, key)
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
if name in cls.__dict__:
obj = cls.__dict__[name]
else:
obj = getattr(cls, name)
# Figure out where it was defined.
homecls = getattr(obj, "__objclass__", None)
if homecls is None:
# search the dicts.
for base in mro:
if name in base.__dict__:
homecls = base
break
# Get the object again, in order to get it from the defining
# __dict__ instead of via getattr (if possible).
if homecls is not None and name in homecls.__dict__:
obj = homecls.__dict__[name]
# Also get the object via getattr.
obj_via_getattr = getattr(cls, name)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, types.StringTypes):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(string.lstrip(line))
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in module')
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('arg is a built-in class')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('arg is not a module, class, method, '
'function, traceback, frame, or code object')
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda info:
(-len(info[0]), info[0], info[1], info[2]),
imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the Python source file an object was defined in, if it exists."""
filename = getfile(object)
if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object) or getfile(object)
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srow_scol, erow_ecol, line):
srow, scol = srow_scol
erow, ecol = erow_ecol
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srow
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args varargs keywords')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('arg is not a code object')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(co.co_code):
op = ord(co.co_code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return Arguments(args, varargs, varkw)
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.func_code)
return ArgSpec(args, varargs, varkw, func.func_defaults)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('arg is not a frame or traceback object')
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
currentframe = sys._getframe
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
|
|
#!/usr/bin/env python2.7
from tac import *
class CodeGenState(object):
def __init__(self):
self.output = []
self.param_count = 0
self.pos = 0
self.pos_stack = []
self.strings = []
# This will store the offsets for the
# All of the frames
self.decls = [{}]
self.param_offset = [0]
self.arg_offset = [0]
self.label_count = 0
def new_frame(self):
self.decls.append({})
self.param_offset.append(0)
self.arg_offset.append(0)
def delete_frame(self):
self.decls.pop()
self.param_offset.pop()
self.arg_offset.pop()
def add_decl(self, decl):
self.decls[-1][decl.name] = -self.param_offset[-1] - 4
self.param_offset[-1] += self.sizeof(decl.typename)
def add_arg(self, arg):
self.decls[-1][arg.identifier] = self.arg_offset[-1] + 8
self.arg_offset[-1] += self.sizeof(arg.type)
def sizeof(self, type):
return 4
def outl(self, line, *args, **kwargs):
self.out("\t" + line, *args, **kwargs)
def out(self, line, *args, **kwargs):
self.output.append(str(line).format(*args, **kwargs))
def load(self, register, var):
if isinstance(var, (ast.Literal, str)):
if isinstance(var, ast.String):
self.outl("mov {}, strconst_{}", register, var.count)
self.strings.append(var)
else:
self.outl("mov {}, {}", register, var)
elif isinstance(var, ast.Identifier):
self.outl("mov {}, [ebp + {}]",register, self.get_offset(var))
def store(self, loc, val):
offset0 = self.get_offset(loc)
if isinstance(val, ast.Literal):
self.outl("mov dword [ebp + {}], {}", offset, val)
elif isinstance(val, ast.Identifier):
self.load("eax", val)
self.outl("mov dword [ebp + {}], eax", offset0)
elif isinstance(val, str):
self.outl("mov dword [ebp + {}], {}", offset0, val)
def get_offset(self, identifier):
return self.decls[-1][identifier]
def push(self, register):
self.pos += 4
self.outl("push {}", register)
def pop(self, register):
self.pos -= 4
self.outl("pop {}", register)
def add_stack(self, amount):
self.pos += amount
self.outl("sub esp, {}", amount)
def sub_stack(self, amount):
self.pos -= amount
self.outl("add esp, {}", amount)
def set_base_pointer(self):
self.pos_stack.append(self.pos)
self.outl("push ebp")
self.outl("mov ebp, esp")
self.pos = 0
def unset_base_pointer(self):
self.outl("mov esp, ebp")
self.pop("ebp")
self.pos = self.pos_stack.pop()
def make_label(self):
t = self.label_count
self.label_count += 1
return t
def gen_StartFunc(x, state):
state.new_frame()
state.out("{}:", x.identifier)
state.set_base_pointer()
def gen_Decl(x, state):
state.add_decl(x)
def gen_EndDecls(x, state):
state.add_stack(state.param_offset[-1])
def gen_FuncCall(x, state):
state.outl("call {}", x.identifier)
state.store(x.retval, "eax")
state.sub_stack(state.param_count)
state.param_count = 0
def gen_Param(x, state):
state.load("eax", x.value)
state.push("eax")
state.param_count += 4
def gen_Argument(x, state):
state.add_arg(x)
def gen_Op(x, state):
"""
<op> <result> <lhs> <rhs>
"""
cmp_list = ["<", ">", "<=", ">=", "=="]
if x.op in cmp_list:
return gen_CMP(x, state)
instructions = {
"+" : "add eax,",
"-" : "sub eax,",
"*" : "mul" ,
"<" : "cmp eax,",
"/" : "div"
}
instr = instructions[x.op]
state.load("eax", x.lhs)
state.load("ebx", x.rhs)
state.outl("{} ebx", instr)
state.outl("mov [ebp + {}], eax", state.get_offset(x.assign))
def gen_CMP(x, state):
state.load("ecx", "1")
state.load("eax", x.lhs)
state.load("ebx", x.rhs)
state.outl("cmp eax, ebx")
label = state.make_label()
jump_table = { "<" : "jl",
">" : "jg",
"<=" : "jle",
">=" : "jge",
"==" : "je",
"!=" : "jne"}
state.outl("{} .cmp{}", jump_table[x.op], label)
state.load("ecx", "0")
state.out(".cmp{}:", label)
state.store(x.assign, "ecx")
pass
def gen_Assign(x, state):
offset = state.get_offset(x.identifier)
state.load("eax", x.var)
state.store(x.identifier, "eax")
def gen_EndFunc(x, state):
state.out(".end:")
state.unset_base_pointer()
state.outl("ret")
state.delete_frame()
def gen_Return(x, state):
if x.value:
state.load("eax", x.value)
state.outl("jmp .end")
def gen_JZ(x, state):
state.load("eax", x.var)
state.outl("cmp eax, 0")
state.outl("jz .L{}", x.label.value)
def gen_JP(x, state):
state.outl("jmp .L{}", x.label.value)
def gen_JNZ(x, state):
state.load("eax", x.var)
state.outl("cmp eax, 0")
state.outl("jnz .L{}", x.label.value)
def output_print(state):
state.outl("mov eax, 4")
state.outl("mov ebx, 1")
state.outl("mov ecx, print")
state.outl("mov edx, print_len")
state.outl("int 80h")
def gen_asm(tac):
state = CodeGenState()
state.out("[BITS 32]")
state.out("section .bss")
state.out("str0: resb 0x20")
setup_stack = False
if setup_stack:
state.out("_stack_start:")
state.outl("resb 0xffff")
state.out("_stack_end:")
state.out("section .text")
state.outl("global _start")
state.out("_start:")
if setup_stack:
state.outl("mov esp, _stack_start")
state.outl("mov ebp, esp")
state.outl("call main")
state.push("eax")
state.outl("call exit")
for x in tac:
state.out(";------------------------------------| {}", x)
if isinstance(x, StartFunc):
gen_StartFunc(x, state)
elif isinstance(x, EndFunc):
gen_EndFunc(x, state)
elif isinstance(x, Param):
gen_Param(x, state)
elif isinstance(x, Argument):
gen_Argument(x, state)
elif isinstance(x, Op):
gen_Op(x, state)
elif isinstance(x, Return):
gen_Return(x, state)
elif isinstance(x, JP):
gen_JP(x, state)
elif isinstance(x, JNZ):
gen_JNZ(x, state)
elif isinstance(x, JZ):
gen_JZ(x, state)
elif isinstance(x, Assign):
gen_Assign(x, state)
elif isinstance(x, Return):
gen_Return(x, state)
elif isinstance(x, FuncCall):
gen_FuncCall(x, state)
elif isinstance(x, Decl):
gen_Decl(x, state)
elif isinstance(x, EndDecls):
gen_EndDecls(x, state)
elif isinstance(x, Label):
state.out(x)
else:
raise Exception(x.__class__.__name__)
state.outl('%include "stdlib/stdlib.asm"')
state.out("section .data")
for x in state.strings:
state.out("strconst_{}:", x.count)
state.out("db {}, 0", str(x))
return "\n".join(state.output)
def main():
source = """\
function fib(int a) -> int
{
if(a < 2)
{
return 1
}
return fib(a - 1) + fib(a - 2)
}
function factorial(int a) -> int
{
if(a < 2)
{
return 1
}
return a * factorial(a - 1)
}
function return_string() -> string
{
return "Hello, world"
}
function main() -> int
{
prints(return_string())
return 0
}
"""
print(source)
print("-" * 80)
t = make_tac(source)
for x in t:
print(x)
print("-" * 80)
out = gen_asm(t)
print_asm = True
if print_asm:
print("-" * 80)
for lineno, line in enumerate(out.split("\n")):
sys.stdout.write("{:02d}:{}\n".format(lineno + 1, line))
open("out.s", "w").write(out)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
from src.abstract.lexer.lexer import AbstractLexer
from src.abstract.lexer.token import AbstractToken
import unittest
class TestLexer(unittest.TestCase):
'''
: unit tests for lexer class.
'''
lexer = AbstractLexer()
def test_lexer_string_blank(self):
attempt = self.lexer.lex('')
answer = []
self.assertEqual(attempt, answer)
#
# strings
#
def test_lexer_string_short_A(self):
attempt = str(self.lexer.lex('A'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'A'}])
self.assertEqual(attempt, answer)
def test_lexer_string_short_a(self):
attempt = str(self.lexer.lex('a'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'a'}])
self.assertEqual(attempt, answer)
def test_lexer_string_short_Z(self):
attempt = str(self.lexer.lex('Z'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'Z'}])
self.assertEqual(attempt, answer)
def test_lexer_string_short_z(self):
attempt = str(self.lexer.lex('z'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'z'}])
self.assertEqual(attempt, answer)
def test_lexer_string_long_A(self):
attempt = str(self.lexer.lex('AA'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'A'},
{'y': 0, 'x': 1, 'type': 'STRING', 'value': 'A'}])
self.assertEqual(attempt, answer)
def test_lexer_string_long_a(self):
attempt = str(self.lexer.lex('aa'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'a'},
{'y': 0, 'x': 1, 'type': 'STRING', 'value': 'a'}])
self.assertEqual(attempt, answer)
def test_lexer_string_long_Z(self):
attempt = str(self.lexer.lex('ZZ'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'Z'},
{'y': 0, 'x': 1, 'type': 'STRING', 'value': 'Z'}])
self.assertEqual(attempt, answer)
def test_lexer_string_long_z(self):
attempt = str(self.lexer.lex('zz'))
answer = str([{'y': 0, 'x': 0, 'type': 'STRING', 'value': 'z'},
{'y': 0, 'x': 1, 'type': 'STRING', 'value': 'z'}])
self.assertEqual(attempt, answer)
#
# numbers
#
def test_lexer_number_one(self):
attempt = str(self.lexer.lex('1'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '1'}])
self.assertEqual(attempt, answer)
def test_lexer_number_two(self):
attempt = str(self.lexer.lex('2'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '2'}])
self.assertEqual(attempt, answer)
def test_lexer_number_three(self):
attempt = str(self.lexer.lex('3'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '3'}])
self.assertEqual(attempt, answer)
def test_lexer_number_four(self):
attempt = str(self.lexer.lex('4'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '4'}])
self.assertEqual(attempt, answer)
def test_lexer_number_five(self):
attempt = str(self.lexer.lex('5'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '5'}])
self.assertEqual(attempt, answer)
def test_lexer_number_six(self):
attempt = str(self.lexer.lex('6'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '6'}])
self.assertEqual(attempt, answer)
def test_lexer_number_seven(self):
attempt = str(self.lexer.lex('7'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '7'}])
self.assertEqual(attempt, answer)
def test_lexer_number_eight(self):
attempt = str(self.lexer.lex('8'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '8'}])
self.assertEqual(attempt, answer)
def test_lexer_number_nine(self):
attempt = str(self.lexer.lex('9'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '9'}])
self.assertEqual(attempt, answer)
def test_lexer_number_zero(self):
attempt = str(self.lexer.lex('0'))
answer = str([{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '0'}])
self.assertEqual(attempt, answer)
#
# symbols
#
def test_lexer_symbol_random_01(self):
attempt = str(self.lexer.lex('@'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '@'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_02(self):
attempt = str(self.lexer.lex('+'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '+'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_03(self):
attempt = str(self.lexer.lex('!'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '!'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_04(self):
attempt = str(self.lexer.lex('#'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '#'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_05(self):
attempt = str(self.lexer.lex('{'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '{'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_06(self):
attempt = str(self.lexer.lex('}'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '}'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_07(self):
attempt = str(self.lexer.lex('-'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '-'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_08(self):
attempt = str(self.lexer.lex('_'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '_'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_09(self):
attempt = str(self.lexer.lex('^'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '^'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_10(self):
attempt = str(self.lexer.lex('%'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '%'}])
self.assertEqual(attempt, answer)
def test_lexer_symbol_random_11(self):
attempt = str(self.lexer.lex('.'))
answer = str([{'y': 0, 'x': 0, 'type': 'SYMBOL', 'value': '.'}])
self.assertEqual(attempt, answer)
#
# space
#
def test_lexer_space_single(self):
attempt = str(self.lexer.lex(' '))
answer = str([{'y': 0, 'x': 0, 'type': 'SPACE', 'value': ' '}])
self.assertEqual(attempt, answer)
def test_lexer_space_multiple(self):
attempt = str(self.lexer.lex(' '))
answer = str([{'y': 0, 'x': 0, 'type': 'SPACE', 'value': ' '},
{'y': 0, 'x': 1, 'type': 'SPACE', 'value': ' '},
{'y': 0, 'x': 2, 'type': 'SPACE', 'value': ' '}])
self.assertEqual(attempt, answer)
#
# mixed
#
def test_lexer_mixed_index_type_same(self):
attempt = str(self.lexer.lex(['0',1,23,456,7890]))
answer = str([[{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '0'}],
[{'y': 1, 'x': 0, 'type': 'NUMBER', 'value': '1'}],
[{'y': 2, 'x': 0, 'type': 'NUMBER', 'value': '2'},
{'y': 2, 'x': 1, 'type': 'NUMBER', 'value': '3'}],
[{'y': 3, 'x': 0, 'type': 'NUMBER', 'value': '4'},
{'y': 3, 'x': 1, 'type': 'NUMBER', 'value': '5'},
{'y': 3, 'x': 2, 'type': 'NUMBER', 'value': '6'}],
[{'y': 4, 'x': 0, 'type': 'NUMBER', 'value': '7'},
{'y': 4, 'x': 1, 'type': 'NUMBER', 'value': '8'},
{'y': 4, 'x': 2, 'type': 'NUMBER', 'value': '9'},
{'y': 4, 'x': 3, 'type': 'NUMBER', 'value': '0'}]])
self.assertEqual(attempt, answer)
def test_lexer_mixed_index_type_different(self):
attempt = str(self.lexer.lex(['0','A','bc','$%^',7890]))
answer = str([[{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '0'}],
[{'y': 1, 'x': 0, 'type': 'STRING', 'value': 'A'}],
[{'y': 2, 'x': 0, 'type': 'STRING', 'value': 'b'},
{'y': 2, 'x': 1, 'type': 'STRING', 'value': 'c'}],
[{'y': 3, 'x': 0, 'type': 'SYMBOL', 'value': '$'},
{'y': 3, 'x': 1, 'type': 'SYMBOL', 'value': '%'},
{'y': 3, 'x': 2, 'type': 'SYMBOL', 'value': '^'}],
[{'y': 4, 'x': 0, 'type': 'NUMBER', 'value': '7'},
{'y': 4, 'x': 1, 'type': 'NUMBER', 'value': '8'},
{'y': 4, 'x': 2, 'type': 'NUMBER', 'value': '9'},
{'y': 4, 'x': 3, 'type': 'NUMBER', 'value': '0'}]])
self.assertEqual(attempt, answer)
def test_lexer_mixed_index_type_empty(self):
attempt = str(self.lexer.lex(['0','A','bc','$%^','','']))
answer = str([[{'y': 0, 'x': 0, 'type': 'NUMBER', 'value': '0'}],
[{'y': 1, 'x': 0, 'type': 'STRING', 'value': 'A'}],
[{'y': 2, 'x': 0, 'type': 'STRING', 'value': 'b'},
{'y': 2, 'x': 1, 'type': 'STRING', 'value': 'c'}],
[{'y': 3, 'x': 0, 'type': 'SYMBOL', 'value': '$'},
{'y': 3, 'x': 1, 'type': 'SYMBOL', 'value': '%'},
{'y': 3, 'x': 2, 'type': 'SYMBOL', 'value': '^'}],
[],
[]])
self.assertEqual(attempt, answer)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class flags(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv6-reachability/prefixes/prefixes/subTLVs/subTLVs/flags. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines sub-TLV 4.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "flags"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv6-reachability",
"prefixes",
"prefixes",
"subTLVs",
"subTLVs",
"flags",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/subTLVs/subTLVs/flags/state (container)
YANG Description: State parameters of sub-TLV 4.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/subTLVs/subTLVs/flags/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 4.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class flags(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv6-reachability/prefixes/prefixes/subTLVs/subTLVs/flags. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines sub-TLV 4.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "flags"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv6-reachability",
"prefixes",
"prefixes",
"subTLVs",
"subTLVs",
"flags",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/subTLVs/subTLVs/flags/state (container)
YANG Description: State parameters of sub-TLV 4.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv6_reachability/prefixes/prefixes/subTLVs/subTLVs/flags/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 4.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import sys
import textwrap
from collections import Counter
from glob import glob
from itertools import chain, product
from typing import Any, Dict, Iterable, List
import jsonschema
import yaml
from tabulate import tabulate
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore[no-redef]
if __name__ != "__main__":
raise Exception(
"This file is intended to be executed as an executable program. You cannot use it as a module."
)
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
PROVIDER_DATA_SCHEMA_PATH = os.path.join(ROOT_DIR, "airflow", "provider.yaml.schema.json")
CORE_INTEGRATIONS = ["SQL", "Local"]
errors = []
def _filepath_to_module(filepath: str):
filepath = os.path.relpath(os.path.abspath(filepath), ROOT_DIR)
if filepath.endswith(".py"):
filepath = filepath[: -(len(".py"))]
return filepath.replace("/", ".")
def _load_schema() -> Dict[str, Any]:
with open(PROVIDER_DATA_SCHEMA_PATH) as schema_file:
content = json.load(schema_file)
return content
def _load_package_data(package_paths: Iterable[str]):
schema = _load_schema()
result = {}
for provider_yaml_path in package_paths:
with open(provider_yaml_path) as yaml_file:
provider = yaml.load(yaml_file, SafeLoader)
rel_path = os.path.relpath(provider_yaml_path, ROOT_DIR)
try:
jsonschema.validate(provider, schema=schema)
except jsonschema.ValidationError:
raise Exception(f"Unable to parse: {rel_path}.")
result[rel_path] = provider
return result
def get_all_integration_names(yaml_files):
all_integrations = [
i['integration-name'] for f in yaml_files.values() if 'integrations' in f for i in f["integrations"]
]
all_integrations += ["SQL", "Local"]
return all_integrations
def check_integration_duplicates(yaml_files: Dict[str, Dict]):
"""Integration names must be globally unique."""
print("Checking integration duplicates")
all_integrations = get_all_integration_names(yaml_files)
duplicates = [(k, v) for (k, v) in Counter(all_integrations).items() if v > 1]
if duplicates:
print(
"Duplicate integration names found. Integration names must be globally unique. "
"Please delete duplicates."
)
print(tabulate(duplicates, headers=["Integration name", "Number of occurrences"]))
sys.exit(0)
def assert_sets_equal(set1, set2):
try:
difference1 = set1.difference(set2)
except TypeError as e:
raise AssertionError(f'invalid type when attempting set difference: {e}')
except AttributeError as e:
raise AssertionError(f'first argument does not support set difference: {e}')
try:
difference2 = set2.difference(set1)
except TypeError as e:
raise AssertionError(f'invalid type when attempting set difference: {e}')
except AttributeError as e:
raise AssertionError(f'second argument does not support set difference: {e}')
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in sorted(difference1):
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in sorted(difference2):
lines.append(repr(item))
standard_msg = '\n'.join(lines)
raise AssertionError(standard_msg)
def check_if_objects_belongs_to_package(
object_names: List[str], provider_package: str, yaml_file_path: str, resource_type: str
):
for object_name in object_names:
if not object_name.startswith(provider_package):
errors.append(
f"The `{object_name}` object in {resource_type} list in {yaml_file_path} does not start"
f" with the expected {provider_package}."
)
def parse_module_data(provider_data, resource_type, yaml_file_path):
package_dir = ROOT_DIR + "/" + os.path.dirname(yaml_file_path)
provider_package = os.path.dirname(yaml_file_path).replace(os.sep, ".")
py_files = chain(
glob(f"{package_dir}/**/{resource_type}/*.py"), glob(f"{package_dir}/{resource_type}/*.py")
)
expected_modules = {_filepath_to_module(f) for f in py_files if not f.endswith("/__init__.py")}
resource_data = provider_data.get(resource_type, [])
return expected_modules, provider_package, resource_data
def check_completeness_of_list_of_hooks_sensors_hooks(yaml_files: Dict[str, Dict]):
print("Checking completeness of list of {sensors, hooks, operators}")
for (yaml_file_path, provider_data), resource_type in product(
yaml_files.items(), ["sensors", "operators", "hooks"]
):
expected_modules, provider_package, resource_data = parse_module_data(
provider_data, resource_type, yaml_file_path
)
current_modules = {str(i) for r in resource_data for i in r.get('python-modules', [])}
check_if_objects_belongs_to_package(current_modules, provider_package, yaml_file_path, resource_type)
try:
assert_sets_equal(set(expected_modules), set(current_modules))
except AssertionError as ex:
nested_error = textwrap.indent(str(ex), ' ')
errors.append(
f"Incorrect content of key '{resource_type}/python-modules' "
f"in file: {yaml_file_path}\n{nested_error}"
)
def check_duplicates_in_integrations_names_of_hooks_sensors_operators(yaml_files: Dict[str, Dict]):
print("Checking for duplicates in list of {sensors, hooks, operators}")
for (yaml_file_path, provider_data), resource_type in product(
yaml_files.items(), ["sensors", "operators", "hooks"]
):
resource_data = provider_data.get(resource_type, [])
current_integrations = [r.get("integration-name", "") for r in resource_data]
if len(current_integrations) != len(set(current_integrations)):
for integration in current_integrations:
if current_integrations.count(integration) > 1:
errors.append(
f"Duplicated content of '{resource_type}/integration-name/{integration}' "
f"in file: {yaml_file_path}"
)
def check_completeness_of_list_of_transfers(yaml_files: Dict[str, Dict]):
print("Checking completeness of list of transfers")
resource_type = 'transfers'
for yaml_file_path, provider_data in yaml_files.items():
expected_modules, provider_package, resource_data = parse_module_data(
provider_data, resource_type, yaml_file_path
)
current_modules = {r.get('python-module') for r in resource_data}
check_if_objects_belongs_to_package(current_modules, provider_package, yaml_file_path, resource_type)
try:
assert_sets_equal(set(expected_modules), set(current_modules))
except AssertionError as ex:
nested_error = textwrap.indent(str(ex), ' ')
errors.append(
f"Incorrect content of key '{resource_type}/python-module' "
f"in file: {yaml_file_path}\n{nested_error}"
)
def check_hook_classes(yaml_files: Dict[str, Dict]):
print("Checking connection classes belong to package")
resource_type = 'hook-class-names'
for yaml_file_path, provider_data in yaml_files.items():
provider_package = os.path.dirname(yaml_file_path).replace(os.sep, ".")
hook_class_names = provider_data.get(resource_type)
if hook_class_names:
check_if_objects_belongs_to_package(
hook_class_names, provider_package, yaml_file_path, resource_type
)
def check_duplicates_in_list_of_transfers(yaml_files: Dict[str, Dict]):
print("Checking for duplicates in list of transfers")
errors = []
resource_type = "transfers"
for yaml_file_path, provider_data in yaml_files.items():
resource_data = provider_data.get(resource_type, [])
source_target_integrations = [
(r.get("source-integration-name", ""), r.get("target-integration-name", ""))
for r in resource_data
]
if len(source_target_integrations) != len(set(source_target_integrations)):
for integration_couple in source_target_integrations:
if source_target_integrations.count(integration_couple) > 1:
errors.append(
f"Duplicated content of \n"
f" '{resource_type}/source-integration-name/{integration_couple[0]}' "
f" '{resource_type}/target-integration-name/{integration_couple[1]}' "
f"in file: {yaml_file_path}"
)
def check_invalid_integration(yaml_files: Dict[str, Dict]):
print("Detect unregistered integrations")
all_integration_names = set(get_all_integration_names(yaml_files))
for (yaml_file_path, provider_data), resource_type in product(
yaml_files.items(), ["sensors", "operators", "hooks"]
):
resource_data = provider_data.get(resource_type, [])
current_names = {r['integration-name'] for r in resource_data}
invalid_names = current_names - all_integration_names
if invalid_names:
errors.append(
f"Incorrect content of key '{resource_type}/integration-name' in file: {yaml_file_path}. "
f"Invalid values: {invalid_names}"
)
for (yaml_file_path, provider_data), key in product(
yaml_files.items(), ['source-integration-name', 'target-integration-name']
):
resource_data = provider_data.get('transfers', [])
current_names = {r[key] for r in resource_data}
invalid_names = current_names - all_integration_names
if invalid_names:
errors.append(
f"Incorrect content of key 'transfers/{key}' in file: {yaml_file_path}. "
f"Invalid values: {invalid_names}"
)
def check_doc_files(yaml_files: Dict[str, Dict]):
print("Checking doc files")
current_doc_urls = []
current_logo_urls = []
for provider in yaml_files.values():
if 'integrations' in provider:
current_doc_urls.extend(
guide
for guides in provider['integrations']
if 'how-to-guide' in guides
for guide in guides['how-to-guide']
)
current_logo_urls.extend(
integration['logo'] for integration in provider['integrations'] if 'logo' in integration
)
if 'transfers' in provider:
current_doc_urls.extend(
op['how-to-guide'] for op in provider['transfers'] if 'how-to-guide' in op
)
expected_doc_urls = {
"/docs/" + os.path.relpath(f, start=DOCS_DIR)
for f in glob(f"{DOCS_DIR}/apache-airflow-providers-*/operators/**/*.rst", recursive=True)
if not f.endswith("/index.rst") and '/_partials' not in f
}
expected_doc_urls |= {
"/docs/" + os.path.relpath(f, start=DOCS_DIR)
for f in glob(f"{DOCS_DIR}/apache-airflow-providers-*/operators.rst", recursive=True)
}
expected_logo_urls = {
"/" + os.path.relpath(f, start=DOCS_DIR)
for f in glob(f"{DOCS_DIR}/integration-logos/**/*", recursive=True)
if os.path.isfile(f)
}
try:
assert_sets_equal(set(expected_doc_urls), set(current_doc_urls))
assert_sets_equal(set(expected_logo_urls), set(current_logo_urls))
except AssertionError as ex:
print(ex)
sys.exit(1)
def check_unique_provider_name(yaml_files: Dict[str, Dict]):
provider_names = [d['name'] for d in yaml_files.values()]
duplicates = {x for x in provider_names if provider_names.count(x) > 1}
if duplicates:
errors.append(f"Provider name must be unique. Duplicates: {duplicates}")
if __name__ == '__main__':
all_provider_files = sorted(glob(f"{ROOT_DIR}/airflow/providers/**/provider.yaml", recursive=True))
if len(sys.argv) > 1:
paths = sorted(sys.argv[1:])
else:
paths = all_provider_files
all_parsed_yaml_files: Dict[str, Dict] = _load_package_data(paths)
all_files_loaded = len(all_provider_files) == len(paths)
check_integration_duplicates(all_parsed_yaml_files)
check_completeness_of_list_of_hooks_sensors_hooks(all_parsed_yaml_files)
check_duplicates_in_integrations_names_of_hooks_sensors_operators(all_parsed_yaml_files)
check_completeness_of_list_of_transfers(all_parsed_yaml_files)
check_duplicates_in_list_of_transfers(all_parsed_yaml_files)
check_hook_classes(all_parsed_yaml_files)
check_unique_provider_name(all_parsed_yaml_files)
if all_files_loaded:
# Only check those if all provider files are loaded
check_doc_files(all_parsed_yaml_files)
check_invalid_integration(all_parsed_yaml_files)
if errors:
print(f"Found {len(errors)} errors")
for error in errors:
print(error)
print()
sys.exit(1)
|
|
# -*- coding: utf-8 -*-
"""
thriftpy.thrift
~~~~~~~~~~~~~~~~~~
Thrift simplified.
"""
from __future__ import absolute_import
import functools
from ._compat import init_func_generator, with_metaclass, calling_func_generator
def args2kwargs(thrift_spec, *args):
arg_names = [item[1][1] for item in sorted(thrift_spec.items())]
return dict(zip(arg_names, args))
class TType(object):
STOP = 0
VOID = 1
BOOL = 2
BYTE = 3
I08 = 3
DOUBLE = 4
I16 = 6
I32 = 8
I64 = 10
STRING = 11
UTF7 = 11
BINARY = 11 # This here just for parsing. For all purposes, it's a string
STRUCT = 12
MAP = 13
SET = 14
LIST = 15
UTF8 = 16
UTF16 = 17
_VALUES_TO_NAMES = {
STOP: 'STOP',
VOID: 'VOID',
BOOL: 'BOOL',
BYTE: 'BYTE',
I08: 'BYTE',
DOUBLE: 'DOUBLE',
I16: 'I16',
I32: 'I32',
I64: 'I64',
STRING: 'STRING',
UTF7: 'STRING',
BINARY: 'STRING',
STRUCT: 'STRUCT',
MAP: 'MAP',
SET: 'SET',
LIST: 'LIST',
UTF8: 'UTF8',
UTF16: 'UTF16'
}
class TMessageType(object):
CALL = 1
REPLY = 2
EXCEPTION = 3
ONEWAY = 4
class TPayloadMeta(type):
def __new__(cls, name, bases, attrs):
if "default_spec" in attrs:
attrs["__init__"] = init_func_generator(attrs.pop("default_spec"))
return super(TPayloadMeta, cls).__new__(cls, name, bases, attrs)
def gen_init(cls, thrift_spec=None, default_spec=None):
if thrift_spec is not None:
cls.thrift_spec = thrift_spec
if default_spec is not None:
cls.__init__ = init_func_generator(default_spec)
return cls
class TPayload(with_metaclass(TPayloadMeta, object)):
def read(self, iprot):
iprot.read_struct(self)
def write(self, oprot):
oprot.write_struct(self)
def __repr__(self):
l = ['%s=%r' % (key, value) for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(l))
def __str__(self):
return repr(self)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.__dict__ == other.__dict__
def __hash__(self):
return super(TPayload, self).__hash__()
def __ne__(self, other):
return not self.__eq__(other)
class TClient(object):
def __init__(self, service, iprot, oprot=None):
self._service = service
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
self._functions = {}
def __getattr__(self, _api):
if _api in self._functions:
return self._functions[_api]
if _api in self._service.thrift_services:
fn = functools.partial(self._req, _api)
args = getattr(self._service, _api + '_args')
doc = self._service.thrift_services_doc.get(_api)
ff = calling_func_generator(fn, getattr(self._service, _api + '_args'), doc)
self._functions[_api] = ff
return ff
raise AttributeError("{} instance has no attribute '{}'".format(
self.__class__.__name__, _api))
def __dir__(self):
return self._service.thrift_services
def _req(self, _api, *args, **kwargs):
_kw = args2kwargs(getattr(self._service, _api + "_args").thrift_spec,
*args)
kwargs.update(_kw)
result_cls = getattr(self._service, _api + "_result")
self._send(_api, **kwargs)
# wait result only if non-oneway
if not getattr(result_cls, "oneway"):
return self._recv(_api)
def _send(self, _api, **kwargs):
self._oprot.write_message_begin(_api, TMessageType.CALL, self._seqid)
args = getattr(self._service, _api + "_args")()
for k, v in kwargs.items():
setattr(args, k, v)
args.write(self._oprot)
self._oprot.write_message_end()
self._oprot.trans.flush()
def _recv(self, _api):
fname, mtype, rseqid = self._iprot.read_message_begin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.read_message_end()
raise x
result = getattr(self._service, _api + "_result")()
result.read(self._iprot)
self._iprot.read_message_end()
if hasattr(result, "success") and result.success is not None:
return result.success
# void api without throws
if len(result.thrift_spec) == 0:
return
# check throws
for k, v in result.__dict__.items():
if k != "success" and v:
raise v
# no throws & not void api
if hasattr(result, "success"):
raise TApplicationException(TApplicationException.MISSING_RESULT)
class TProcessor(object):
"""Base class for procsessor, which works on two streams."""
def __init__(self, service, handler):
self._service = service
self._handler = handler
def process_in(self, iprot):
api, type, seqid = iprot.read_message_begin()
if api not in self._service.thrift_services:
iprot.skip(TType.STRUCT)
iprot.read_message_end()
return api, seqid, TApplicationException(TApplicationException.UNKNOWN_METHOD), None # noqa
args = getattr(self._service, api + "_args")()
args.read(iprot)
iprot.read_message_end()
result = getattr(self._service, api + "_result")()
# convert kwargs to args
api_args = [args.thrift_spec[k][1] for k in sorted(args.thrift_spec)]
def call():
f = getattr(self._handler, api)
return f(*(args.__dict__[k] for k in api_args))
return api, seqid, result, call
def send_exception(self, oprot, api, exc, seqid):
oprot.write_message_begin(api, TMessageType.EXCEPTION, seqid)
exc.write(oprot)
oprot.write_message_end()
oprot.trans.flush()
def send_result(self, oprot, api, result, seqid):
oprot.write_message_begin(api, TMessageType.REPLY, seqid)
result.write(oprot)
oprot.write_message_end()
oprot.trans.flush()
def handle_exception(self, e, result):
for k in sorted(result.thrift_spec):
if result.thrift_spec[k][1] == "success":
continue
_, exc_name, exc_cls, _ = result.thrift_spec[k]
if isinstance(e, exc_cls):
setattr(result, exc_name, e)
break
else:
raise
def process(self, iprot, oprot):
api, seqid, result, call = self.process_in(iprot)
if isinstance(result, TApplicationException):
return self.send_exception(oprot, api, result, seqid)
try:
result.success = call()
except Exception as e:
# raise if api don't have throws
self.handle_exception(e, result)
if not result.oneway:
self.send_result(oprot, api, result, seqid)
class TMultiplexedProcessor(TProcessor):
SEPARATOR = ":"
def __init__(self):
self.processors = {}
def register_processor(self, service_name, processor):
if service_name in self.processors:
raise TApplicationException(
type=TApplicationException.INTERNAL_ERROR,
message='processor for `{0}` already registered'
.format(service_name))
self.processors[service_name] = processor
def process_in(self, iprot):
api, type, seqid = iprot.read_message_begin()
if type not in (TMessageType.CALL, TMessageType.ONEWAY):
raise TException("TMultiplex protocol only supports CALL & ONEWAY")
if TMultiplexedProcessor.SEPARATOR not in api:
raise TException("Service name not found in message. "
"You should use TMultiplexedProtocol in client.")
service_name, api = api.split(TMultiplexedProcessor.SEPARATOR)
if service_name not in self.processors:
iprot.skip(TType.STRUCT)
iprot.read_message_end()
e = TApplicationException(TApplicationException.UNKNOWN_METHOD)
return api, seqid, e, None
proc = self.processors[service_name]
args = getattr(proc._service, api + "_args")()
args.read(iprot)
iprot.read_message_end()
result = getattr(proc._service, api + "_result")()
# convert kwargs to args
api_args = [args.thrift_spec[k][1] for k in sorted(args.thrift_spec)]
def call():
f = getattr(proc._handler, api)
return f(*(args.__dict__[k] for k in api_args))
return api, seqid, result, call
class TProcessorFactory(object):
def __init__(self, processor_class, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.processor_class = processor_class
def get_processor(self):
return self.processor_class(*self.args, **self.kwargs)
class TException(TPayload, Exception):
"""Base class for all thrift exceptions."""
class TApplicationException(TException):
"""Application level thrift exceptions."""
thrift_spec = {
1: (TType.STRING, 'message', False),
2: (TType.I32, 'type', False),
}
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
def __init__(self, type=UNKNOWN, message=None):
super(TApplicationException, self).__init__()
self.type = type
self.message = message
def __str__(self):
if self.message:
return self.message
if self.type == self.UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == self.INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == self.WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == self.BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == self.MISSING_RESULT:
return 'Missing result'
else:
return 'Default (unknown) TApplicationException'
|
|
import unittest
import webtest
from rdflib import BNode, ConjunctiveGraph, Graph, Literal, URIRef
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from flask_rdf.wsgi import returns_rdf, output, Decorator
def make_graph():
graph = Graph('IOMemory', BNode())
person = URIRef('http://example.com/#person')
graph.add((person, RDF.type, FOAF.Person))
graph.add((person, FOAF.age, Literal(15, datatype=XSD.integer)))
return graph
graph = make_graph()
def make_ctx_graph():
context = URIRef('http://example.com/#root')
graph = ConjunctiveGraph('IOMemory', context)
person = URIRef('http://example.com/#person')
graph.add((person, RDF.type, FOAF.Person))
graph.add((person, FOAF.age, Literal(15, datatype=XSD.integer)))
return graph
ctx_graph = make_ctx_graph()
def make_unicode_graph():
mygraph = make_graph()
mygraph.add((BNode(), FOAF.name, Literal('\u2603')))
return mygraph
unicode_graph = make_unicode_graph()
@returns_rdf
def application(environ, start_response):
path = environ.get('PATH_INFO', '/')
if path == '/test':
return graph
if path == '/ctx':
return ctx_graph
if path == '/unicode':
return unicode_graph
if path == '/text':
return ['This is a test string'.encode('utf-8')]
if path == '/sneaky':
write = start_response('200 OK', [])
write('Sneaky'.encode('utf-8'))
return ['Step2'.encode('utf-8')]
if path == '/202':
start_response('202 Custom', [('CustomHeader','yes')])
return graph
if path == '/smart':
start_response('200 OK', [('Vary','Accept')])
return graph
if path == '/profile':
start_response('200 OK', [('Vary','Cookie')])
return graph
if path == '/varyall':
start_response('200 OK', [('Vary','*')])
return graph
app = webtest.TestApp(application)
class TestCases(unittest.TestCase):
def test_output_simple(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/n3;q=0.5, text/turtle;q=0.9'}
response = {'content-type': '', 'status': '200 OK', 'data': []}
def set_http_code(arg):
response['status'] = arg
def set_content_type(arg):
response['content-type'] = arg
response['data'] = output(graph, headers['Accept'], set_http_code, set_content_type)
self.assertEqual(turtle, response['data'][0])
self.assertEqual('text/turtle; charset=utf-8', response['content-type'])
self.assertEqual(200, int(response['status'].split()[0]))
def test_format_simple(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/n3;q=0.5, text/turtle;q=0.9'}
response = app.get('/test', headers=headers)
self.assertEqual(turtle, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_format_unacceptable(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/html;q=0.9'}
response = app.get('/test', headers=headers, status=406)
self.assertEqual(406, response.status_int)
def test_format_quads_context(self):
g = ctx_graph
self.assertTrue(g.context_aware)
quads = g.serialize(format='nquads')
headers = {'Accept': 'application/n-quads;q=0.9'}
response = app.get('/ctx', headers=headers)
self.assertEqual(quads, response.body)
self.assertEqual('application/n-quads', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_format_quads_lowprio(self):
""" Test that quads are not used even if possible """
g = ctx_graph
quads = g.serialize(format='turtle')
headers = {'Accept': 'text/turtle;q=0.9, application/n-quads;q=0.4'}
response = app.get('/ctx', headers=headers)
self.assertEqual(quads, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_format_quads_highprio(self):
""" Test that quads are used with alternative """
g = ctx_graph
quads = g.serialize(format='nquads')
headers = {'Accept': 'text/turtle;q=0.4, application/n-quads;q=0.9'}
response = app.get('/ctx', headers=headers)
self.assertEqual(quads, response.body)
self.assertEqual('application/n-quads', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_format_quads_unavailable(self):
""" Test that quads are not used with contextless store """
g = graph
quads = g.serialize(format='turtle')
headers = {'Accept': 'text/turtle;q=0.4, application/n-quads;q=0.9'}
response = app.get('/test', headers=headers)
self.assertEqual(quads, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_empty_format_headers(self):
xml = graph.serialize(format='xml')
headers = {'Accept': ''}
response = app.get('/test', headers=headers)
self.assertEqual('application/rdf+xml', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
def test_text(self):
test_str = 'This is a test string'
headers = {'Accept': 'text/n3;q=0.5, text/turtle;q=0.9'}
response = app.get('/text', headers=headers)
self.assertEqual(test_str.encode('utf-8'), response.body)
def test_sneaky(self):
""" Test WSGI apps that use start_response().write() """
test_str = 'SneakyStep2'
headers = {'Accept': 'text/plain;q=0.5'}
response = app.get('/sneaky', headers=headers)
self.assertEqual(test_str.encode('utf-8'), response.body)
def test_unicode(self):
mygraph = unicode_graph
turtle = mygraph.serialize(format='turtle')
headers = {'Accept': 'text/turtle'}
response = app.get('/unicode', headers=headers)
self.assertEqual(turtle, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
self.assertTrue('\u2603' in response.body.decode('utf-8'))
def test_custom_response(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/turtle'}
response = app.get('/202', headers=headers)
self.assertEqual(turtle, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual('yes', response.headers['CustomHeader'])
self.assertEqual(202, response.status_int)
def test_smart_vary(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/turtle'}
response = app.get('/smart', headers=headers)
self.assertEqual(turtle, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_custom_vary(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/turtle'}
response = app.get('/profile', headers=headers)
self.assertEqual(turtle, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('Cookie, Accept', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_custom_varyall(self):
turtle = graph.serialize(format='turtle')
headers = {'Accept': 'text/turtle'}
response = app.get('/varyall', headers=headers)
self.assertEqual(turtle, response.body)
self.assertEqual('text/turtle; charset=utf-8', response.headers['content-type'])
self.assertEqual('*', response.headers['vary'])
self.assertEqual(200, response.status_int)
def test_decorators(self):
turtle = graph.serialize(format='turtle')
xml = graph.serialize(format='xml')
view = graph
accepts = 'text/n3;q=0.5, text/turtle;q=0.9'
decorator = Decorator()
response = decorator.output(view, accepts, lambda *args: None, lambda *args: None)
self.assertEqual(turtle, response[0])
# use the decorator
decoratee = lambda *args: view
decorated = decorator.decorate(decoratee)
response = decorated({}, lambda *args: None)
self.assertEqual(xml, response[0])
decorated = decorator(decoratee)
response = decorated({}, lambda *args: None)
self.assertEqual(xml, response[0])
|
|
# Authors: Aaron Qiu <[email protected]>,
# Antonio Sutera <[email protected]>,
# Arnaud Joly <[email protected]>,
# Gilles Louppe <[email protected]>,
# Vincent Francois <[email protected]>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import numpy as np
from sklearn.decomposition import PCA
from utils import scale
###########################################
######### SIMPLIFIED METHOD ###############
###########################################
def f1(X):
return X + np.roll(X, -1, axis=0) + np.roll(X, 1, axis=0)
def f2(X):
return X + np.roll(X, 1, axis=0) + 0.8 * np.roll(X, 2, axis=0) + 0.4 * np.roll(X, 3, axis=0)
def g(X):
return np.diff(X, axis=0)
def h(X, threshold = 0.11):
threshold1 = X < threshold * 1
threshold2 = X >= threshold * 1
X_new = np.zeros_like(X)
X_new[threshold1] = 0
X_new[threshold2] = X[threshold2]
return X_new
def w(X):
X_new = X
Sum_X_new = np.sum(X_new, axis=1)
Sum4 = Sum_X_new
normalization = np.max(Sum4)
for i in range(X_new.shape[0]):
r = Sum4[i] / normalization
if Sum4[i] != 0:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i])))
else:
X_new[i, :] = 1
return X_new
def simple_filter(X, LP='f1', threshold=0.11, weights=True):
if LP == 'f1':
X = f1(X)
elif LP == 'f2':
X = f2(X)
else:
raise ValueError("Unknown filter, got %s." % LP)
X = g(X)
X = h(X)
if weights:
X = w(X)
return X
def tuned_filter(X, LP='f1', threshold=0.11, weights=True):
if LP == 'f1':
X = f1(X)
elif LP == 'f2':
X = f2(X)
elif LP == 'f3':
X = f3(X)
elif LP == 'f4':
X = f4(X)
else:
raise ValueError("Unknown filter, got %s." % LP)
X = g(X)
X = h(X)
X = r(X)
if weights:
X = w_star(X)
return X
def make_simple_inference(X):
print('Making simple inference...')
t = [0.100, 0.101, 0.102, 0.103, 0.104, 0.105, 0.106, 0.107, 0.108, 0.109,
0.110, 0.111, 0.112, 0.113, 0.114, 0.115, 0.116, 0.117, 0.118, 0.119,
0.120, 0.121, 0.122, 0.123, 0.124, 0.125, 0.126, 0.127, 0.128, 0.129,
0.130, 0.131, 0.132, 0.133, 0.134, 0.135, 0.136, 0.137, 0.138, 0.139,
0.140, 0.141, 0.142, 0.143, 0.144, 0.145, 0.146, 0.147, 0.148, 0.149,
0.150, 0.151, 0.152, 0.154, 0.155, 0.156, 0.157, 0.158, 0.159, 0.160,
0.161, 0.162, 0.163, 0.164, 0.165, 0.166, 0.167, 0.168, 0.169, 0.170,
0.171, 0.172, 0.173, 0.174, 0.175, 0.176, 0.177, 0.178, 0.179, 0.180,
0.181, 0.182, 0.183, 0.184, 0.185, 0.186, 0.187, 0.188, 0.189, 0.190,
0.191, 0.192, 0.193, 0.194, 0.195, 0.196, 0.197, 0.198, 0.199, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.210]
weight = 0
n_samples, n_nodes = X.shape
y_pred_agg = np.zeros((n_nodes, n_nodes))
for threshold in t:
for filtering in ["f1", "f2"]:
print('Current: %0.3f, %s' % (threshold, filtering))
X_new = simple_filter(X, LP = filtering, threshold = t, weights = True)
pca = PCA(whiten=True, n_components=int(0.8 * n_nodes)).fit(X_new)
y_pred = - pca.get_precision()
if filtering == 'f1':
y_pred_agg += y_pred
weight += 1
elif filtering == 'f2':
y_pred_agg += y_pred * 0.9
weight += 0.9
return scale(y_pred_agg / weight)
###########################################
############# TUNED METHOD ################
###########################################
def f3(X):
return X + np.roll(X, -1, axis=0) + np.roll(X, -2, axis=0) + np.roll(X, 1, axis=0)
def f4(X):
return X + np.roll(X, -1, axis=0) + np.roll(X, -2, axis=0) + np.roll(X, -3, axis=0)
def r(X):
return X**0.9
def w_star(X, filtering = "f1"):
X_new = X
Sum_X_new = np.sum(X_new, axis=1)
Sum4 = Sum_X_new + 0.5 * np.roll(Sum_X_new, 1)
normalization = np.max(Sum4)
for i in range(X_new.shape[0]):
r = Sum4[i] / normalization
if filtering == "f1":
if Sum4[i] > 0 and r < 0.23 and r > 0.05:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.9
elif Sum4[i] > 0 and r < 0.75:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.6
elif Sum4[i] != 0:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.4
else:
X_new[i, :] = 1
elif filtering == "f2":
if Sum4[i] > 0 and r < 0.23 and r > 0.05:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.9
elif Sum4[i] > 0 and r < 0.75:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.6
elif Sum4[i] != 0:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.4
else:
X_new[i, :] = 1
elif filtering == "f3":
if Sum4[i] > 0 and r < 0.22 and r > 0.04:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.9
elif Sum4[i] > 0 and r < 0.75:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.7
elif Sum4[i] != 0:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.5
else:
X_new[i, :] = 1
elif filtering == "f4":
if Sum4[i] > 0 and r < 0.22 and r > 0.08:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.9
elif Sum4[i] != 0:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.5
else:
X_new[i, :] = 1
else:
if Sum4[i] != 0:
X_new[i, :] = ((X_new[i, :] + 1) ** (1 + (1. / Sum4[i]))) ** 1.6
else:
X_new[i, :] = 1
return X_new
def make_tuned_inference(X):
print('Making tuned inference...')
t = [0.100, 0.101, 0.102, 0.103, 0.104, 0.105, 0.106, 0.107, 0.108, 0.109,
0.110, 0.111, 0.112, 0.113, 0.114, 0.115, 0.116, 0.117, 0.118, 0.119,
0.120, 0.121, 0.122, 0.123, 0.124, 0.125, 0.126, 0.127, 0.128, 0.129,
0.130, 0.131, 0.132, 0.133, 0.134, 0.135, 0.136, 0.137, 0.138, 0.139,
0.140, 0.141, 0.142, 0.143, 0.144, 0.145, 0.146, 0.147, 0.148, 0.149,
0.150, 0.151, 0.152, 0.154, 0.155, 0.156, 0.157, 0.158, 0.159, 0.160,
0.161, 0.162, 0.163, 0.164, 0.165, 0.166, 0.167, 0.168, 0.169, 0.170,
0.171, 0.172, 0.173, 0.174, 0.175, 0.176, 0.177, 0.178, 0.179, 0.180,
0.181, 0.182, 0.183, 0.184, 0.185, 0.186, 0.187, 0.188, 0.189, 0.190,
0.191, 0.192, 0.193, 0.194, 0.195, 0.196, 0.197, 0.198, 0.199, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.210]
weight = 0
n_samples, n_nodes = X.shape
y_pred_agg = np.zeros((n_nodes, n_nodes))
for threshold in t:
for filtering in ["f1", "f2", "f3", "f4"]:
print('Current: %0.3f, %s' % (threshold, filtering))
X_new = tuned_filter(X, LP = filtering, threshold = t, weights = True)
pca = PCA(whiten=True, n_components=int(0.8 * n_nodes)).fit(X_new)
y_pred = - pca.get_precision()
if filtering == 'f1':
y_pred_agg += y_pred
weight += 1
elif filtering == 'f2':
y_pred_agg += y_pred * 0.9
weight += 0.9
elif filtering == 'f3':
y_pred_agg += y_pred * 0.01
weight += 0.01
elif filtering == 'f4':
y_pred_agg += y_pred * 0.7
weight += 0.7
return scale(y_pred_agg / weight)
|
|
"""Support for exposing a templated binary sensor."""
from __future__ import annotations
from datetime import timedelta
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
DOMAIN as BINARY_SENSOR_DOMAIN,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.components.template import TriggerUpdateCoordinator
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_FRIENDLY_NAME_TEMPLATE,
CONF_ICON,
CONF_ICON_TEMPLATE,
CONF_NAME,
CONF_SENSORS,
CONF_STATE,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_call_later
from .const import (
CONF_ATTRIBUTES,
CONF_AVAILABILITY,
CONF_AVAILABILITY_TEMPLATE,
CONF_OBJECT_ID,
CONF_PICTURE,
)
from .template_entity import TemplateEntity
from .trigger_entity import TriggerEntity
CONF_DELAY_ON = "delay_on"
CONF_DELAY_OFF = "delay_off"
CONF_AUTO_OFF = "auto_off"
CONF_ATTRIBUTE_TEMPLATES = "attribute_templates"
LEGACY_FIELDS = {
CONF_ICON_TEMPLATE: CONF_ICON,
CONF_ENTITY_PICTURE_TEMPLATE: CONF_PICTURE,
CONF_AVAILABILITY_TEMPLATE: CONF_AVAILABILITY,
CONF_ATTRIBUTE_TEMPLATES: CONF_ATTRIBUTES,
CONF_FRIENDLY_NAME_TEMPLATE: CONF_NAME,
CONF_FRIENDLY_NAME: CONF_NAME,
CONF_VALUE_TEMPLATE: CONF_STATE,
}
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.template,
vol.Required(CONF_STATE): cv.template,
vol.Optional(CONF_ICON): cv.template,
vol.Optional(CONF_PICTURE): cv.template,
vol.Optional(CONF_AVAILABILITY): cv.template,
vol.Optional(CONF_ATTRIBUTES): vol.Schema({cv.string: cv.template}),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_DELAY_ON): vol.Any(cv.positive_time_period, cv.template),
vol.Optional(CONF_DELAY_OFF): vol.Any(cv.positive_time_period, cv.template),
vol.Optional(CONF_AUTO_OFF): vol.Any(cv.positive_time_period, cv.template),
}
)
LEGACY_BINARY_SENSOR_SCHEMA = vol.All(
cv.deprecated(ATTR_ENTITY_ID),
vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_DELAY_ON): vol.Any(cv.positive_time_period, cv.template),
vol.Optional(CONF_DELAY_OFF): vol.Any(cv.positive_time_period, cv.template),
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
def rewrite_legacy_to_modern_conf(cfg: dict[str, dict]) -> list[dict]:
"""Rewrite legacy binary sensor definitions to modern ones."""
sensors = []
for object_id, entity_cfg in cfg.items():
entity_cfg = {**entity_cfg, CONF_OBJECT_ID: object_id}
for from_key, to_key in LEGACY_FIELDS.items():
if from_key not in entity_cfg or to_key in entity_cfg:
continue
val = entity_cfg.pop(from_key)
if isinstance(val, str):
val = template.Template(val)
entity_cfg[to_key] = val
if CONF_NAME not in entity_cfg:
entity_cfg[CONF_NAME] = template.Template(object_id)
sensors.append(entity_cfg)
return sensors
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(
LEGACY_BINARY_SENSOR_SCHEMA
),
}
)
@callback
def _async_create_template_tracking_entities(
async_add_entities, hass, definitions: list[dict], unique_id_prefix: str | None
):
"""Create the template binary sensors."""
sensors = []
for entity_conf in definitions:
# Still available on legacy
object_id = entity_conf.get(CONF_OBJECT_ID)
value = entity_conf[CONF_STATE]
icon = entity_conf.get(CONF_ICON)
entity_picture = entity_conf.get(CONF_PICTURE)
availability = entity_conf.get(CONF_AVAILABILITY)
attributes = entity_conf.get(CONF_ATTRIBUTES, {})
friendly_name = entity_conf.get(CONF_NAME)
device_class = entity_conf.get(CONF_DEVICE_CLASS)
delay_on_raw = entity_conf.get(CONF_DELAY_ON)
delay_off_raw = entity_conf.get(CONF_DELAY_OFF)
unique_id = entity_conf.get(CONF_UNIQUE_ID)
if unique_id and unique_id_prefix:
unique_id = f"{unique_id_prefix}-{unique_id}"
sensors.append(
BinarySensorTemplate(
hass,
object_id,
friendly_name,
device_class,
value,
icon,
entity_picture,
availability,
delay_on_raw,
delay_off_raw,
attributes,
unique_id,
)
)
async_add_entities(sensors)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template binary sensors."""
if discovery_info is None:
_async_create_template_tracking_entities(
async_add_entities,
hass,
rewrite_legacy_to_modern_conf(config[CONF_SENSORS]),
None,
)
return
if "coordinator" in discovery_info:
async_add_entities(
TriggerBinarySensorEntity(hass, discovery_info["coordinator"], config)
for config in discovery_info["entities"]
)
return
_async_create_template_tracking_entities(
async_add_entities,
hass,
discovery_info["entities"],
discovery_info["unique_id"],
)
class BinarySensorTemplate(TemplateEntity, BinarySensorEntity):
"""A virtual binary sensor that triggers from another sensor."""
def __init__(
self,
hass: HomeAssistant,
object_id: str | None,
friendly_name: template.Template | None,
device_class: str,
value_template: template.Template,
icon_template: template.Template | None,
entity_picture_template: template.Template | None,
availability_template: template.Template | None,
delay_on_raw,
delay_off_raw,
attribute_templates: dict[str, template.Template],
unique_id: str | None,
):
"""Initialize the Template binary sensor."""
super().__init__(
attribute_templates=attribute_templates,
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
if object_id is not None:
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id, hass=hass
)
self._name: str | None = None
self._friendly_name_template: template.Template | None = friendly_name
# Try to render the name as it can influence the entity ID
if friendly_name:
friendly_name.hass = hass
try:
self._name = friendly_name.async_render(parse_result=False)
except template.TemplateError:
pass
self._device_class = device_class
self._template = value_template
self._state = None
self._delay_cancel = None
self._delay_on = None
self._delay_on_raw = delay_on_raw
self._delay_off = None
self._delay_off_raw = delay_off_raw
self._unique_id = unique_id
async def async_added_to_hass(self):
"""Register callbacks."""
self.add_template_attribute("_state", self._template, None, self._update_state)
if (
self._friendly_name_template is not None
and not self._friendly_name_template.is_static
):
self.add_template_attribute("_name", self._friendly_name_template)
if self._delay_on_raw is not None:
try:
self._delay_on = cv.positive_time_period(self._delay_on_raw)
except vol.Invalid:
self.add_template_attribute(
"_delay_on", self._delay_on_raw, cv.positive_time_period
)
if self._delay_off_raw is not None:
try:
self._delay_off = cv.positive_time_period(self._delay_off_raw)
except vol.Invalid:
self.add_template_attribute(
"_delay_off", self._delay_off_raw, cv.positive_time_period
)
await super().async_added_to_hass()
@callback
def _update_state(self, result):
super()._update_state(result)
if self._delay_cancel:
self._delay_cancel()
self._delay_cancel = None
state = (
None
if isinstance(result, TemplateError)
else template.result_as_boolean(result)
)
if state == self._state:
return
# state without delay
if (
state is None
or (state and not self._delay_on)
or (not state and not self._delay_off)
):
self._state = state
return
@callback
def _set_state(_):
"""Set state of template binary sensor."""
self._state = state
self.async_write_ha_state()
delay = (self._delay_on if state else self._delay_off).total_seconds()
# state with delay. Cancelled if template result changes.
self._delay_cancel = async_call_later(self.hass, delay, _set_state)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this binary sensor."""
return self._unique_id
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the sensor class of the binary sensor."""
return self._device_class
class TriggerBinarySensorEntity(TriggerEntity, BinarySensorEntity):
"""Sensor entity based on trigger data."""
domain = BINARY_SENSOR_DOMAIN
extra_template_keys = (CONF_STATE,)
def __init__(
self,
hass: HomeAssistant,
coordinator: TriggerUpdateCoordinator,
config: dict,
) -> None:
"""Initialize the entity."""
super().__init__(hass, coordinator, config)
for key in (CONF_DELAY_ON, CONF_DELAY_OFF, CONF_AUTO_OFF):
if isinstance(config.get(key), template.Template):
self._to_render_simple.append(key)
self._parse_result.add(key)
self._delay_cancel = None
self._auto_off_cancel = None
self._state = False
@property
def is_on(self) -> bool:
"""Return state of the sensor."""
return self._state
@callback
def _handle_coordinator_update(self) -> None:
"""Handle update of the data."""
self._process_data()
if self._delay_cancel:
self._delay_cancel()
self._delay_cancel = None
if self._auto_off_cancel:
self._auto_off_cancel()
self._auto_off_cancel = None
if not self.available:
self.async_write_ha_state()
return
raw = self._rendered.get(CONF_STATE)
state = template.result_as_boolean(raw)
key = CONF_DELAY_ON if state else CONF_DELAY_OFF
delay = self._rendered.get(key) or self._config.get(key)
# state without delay. None means rendering failed.
if self._state == state or state is None or delay is None:
self._set_state(state)
return
if not isinstance(delay, timedelta):
try:
delay = cv.positive_time_period(delay)
except vol.Invalid as err:
logging.getLogger(__name__).warning(
"Error rendering %s template: %s", key, err
)
return
# state with delay. Cancelled if new trigger received
self._delay_cancel = async_call_later(
self.hass, delay.total_seconds(), partial(self._set_state, state)
)
@callback
def _set_state(self, state, _=None):
"""Set up auto off."""
self._state = state
self.async_set_context(self.coordinator.data["context"])
self.async_write_ha_state()
if not state:
return
auto_off_time = self._rendered.get(CONF_AUTO_OFF) or self._config.get(
CONF_AUTO_OFF
)
if auto_off_time is None:
return
if not isinstance(auto_off_time, timedelta):
try:
auto_off_time = cv.positive_time_period(auto_off_time)
except vol.Invalid as err:
logging.getLogger(__name__).warning(
"Error rendering %s template: %s", CONF_AUTO_OFF, err
)
return
@callback
def _auto_off(_):
"""Set state of template binary sensor."""
self._state = False
self.async_write_ha_state()
self._auto_off_cancel = async_call_later(
self.hass, auto_off_time.total_seconds(), _auto_off
)
|
|
# strconv.py
# Copyright (c) 2013 Byron Ruth
# BSD License
__version__ = '0.4.0'
from collections import Counter
class TypeInfo(object):
"Sampling and frequency of a type for a sample of values."
def __init__(self, name, size=None, total=None):
self.name = name
self.count = 0
self.sample = []
self.size = size
self.total = total
self.sample_set = set()
def __repr__(self):
return '<{0}: {1} n={2}>'.format(self.__class__.__name__,
self.name, self.count)
def incr(self, n=1):
self.count += n
def add(self, i, value):
if self.size is None or len(self.sample) < self.size:
# No dupes
if value not in self.sample_set:
self.sample_set.add(value)
self.sample.append((i, value))
def freq(self):
if self.total:
return self.count / float(self.total)
return 0.
class Types(object):
"Type information for a sample of values."
def __init__(self, size=None, total=None):
self.size = size
self.total = None
self.types = {}
def __repr__(self):
types = self.most_common()
label = ', '.join(['{0}={1}'.format(t, i.count) for t, i in types])
return '<{0}: {1}>'.format(self.__class__.__name__, label)
def incr(self, t, n=1):
if t is None:
t = 'unknown'
if t not in self.types:
self.types[t] = TypeInfo(t, self.size, self.total)
self.types[t].incr(n)
def add(self, t, i, value):
if t is None:
t = 'unknown'
if t not in self.types:
self.types[t] = TypeInfo(t, self.size, self.total)
self.types[t].add(i, value)
def set_total(self, total):
self.total = total
for k in self.types:
self.types[k].total = total
def most_common(self, n=None):
if n is None:
n = len(self.types)
c = Counter()
for t in self.types:
c[t] = self.types[t].count
return c.most_common(n)
class Strconv(object):
def __init__(self, converters=()):
self.converters = {}
self._order = []
for name, func in converters:
self.converters[name] = func
self._order.append(name)
def register_converter(self, name, func, priority=None):
if name is None:
raise ValueError('type name cannot be None')
if not callable(func):
raise ValueError('converter functions must be callable')
self.converters[name] = func
if name in self._order:
self._order.remove(name)
if priority is not None and priority < len(self._order):
self._order.insert(priority, name)
else:
self._order.append(name)
def unregister_converter(self, name):
if name in self._order:
self._order.remove(name)
if name in self.converters:
del self.converters[name]
def get_converter(self, name):
if name not in self.converters:
raise KeyError('no converter for type "{0}"'.format(name))
return self.converters[name]
def convert(self, s, include_type=False):
if isinstance(s, str):
for t in self._order:
func = self.converters[t]
try:
v = func(s)
if include_type:
return v, t
return v
except ValueError:
pass
if include_type:
return s, None
return s
def convert_series(self, iterable, include_type=False):
for s in iterable:
yield self.convert(s, include_type=include_type)
def convert_matrix(self, matrix, include_type=False):
for r in matrix:
yield tuple(self.convert(s, include_type=include_type) for s in r)
def infer(self, s, converted=False):
v, t = self.convert(s, include_type=True)
if t and converted:
return type(v)
return t
def infer_series(self, iterable, n=None, size=10):
info = Types(size=size)
i = -1
for i, value in enumerate(iterable):
if n and i >= n:
break
t = self.infer(value)
info.incr(t)
info.add(t, i, value)
i += 1
# No reason to return type info when no data exists
if i == 0:
return
info.set_total(i)
return info
def infer_matrix(self, matrix, n=None, size=10):
infos = []
i = -1
for i, iterable in enumerate(matrix):
if n and i >= n:
break
for j, value in enumerate(iterable):
if i == 0:
infos.append(Types(size=size))
info = infos[j]
t = self.infer(value)
info.incr(t)
info.add(t, i, value)
i += 1
for info in infos:
info.set_total(i)
return infos
# Built-in converters
import re
from datetime import datetime
# Use dateutil for more robust parsing
try:
from dateutil.parser import parse as duparse
except ImportError:
import warnings
warnings.warn('python-dateutil is not installed. As of version 0.5, '
'this will be a hard dependency of strconv for'
'datetime parsing. Without it, only a limited set of '
'datetime formats are supported without timezones.')
duparse = None
DATE_FORMATS = (
'%Y-%m-%d',
'%m-%d-%Y',
'%m/%d/%Y',
'%m.%d.%Y',
'%m-%d-%y',
'%B %d, %Y',
'%B %d, %y',
'%b %d, %Y',
'%b %d, %y',
)
TIME_FORMATS = (
'%H:%M:%S',
'%H:%M',
'%I:%M:%S %p',
'%I:%M %p',
'%I:%M',
)
DATE_TIME_SEPS = (' ', 'T')
true_re = re.compile(r'^(t(rue)?|yes)$', re.I)
false_re = re.compile(r'^(f(alse)?|no)$', re.I)
def convert_int(s):
return int(s)
def convert_float(s):
return float(s)
def convert_bool(s):
if true_re.match(s):
return True
if false_re.match(s):
return False
raise ValueError
def convert_datetime(s, date_formats=DATE_FORMATS, time_formats=TIME_FORMATS):
if duparse:
try:
dt = duparse(s)
if dt.time():
return duparse(s)
except TypeError: # parse may throw this in py3
raise ValueError
for df in date_formats:
for tf in time_formats:
for sep in DATE_TIME_SEPS:
f = '{0}{1}{2}'.format(df, sep, tf)
try:
dt = datetime.strptime(s, f)
if dt.time():
return dt
except ValueError:
pass
raise ValueError
def convert_date(s, date_formats=DATE_FORMATS):
if duparse:
try:
return duparse(s).date()
except TypeError: # parse may throw this in py3
raise ValueError
for f in date_formats:
try:
return datetime.strptime(s, f).date()
except ValueError:
pass
raise ValueError
def convert_time(s, time_formats=TIME_FORMATS):
for f in time_formats:
try:
return datetime.strptime(s, f).time()
except ValueError:
pass
raise ValueError
# Initialize default instance and make accessible at the module level
default_strconv = Strconv(converters=[
('int', convert_int),
('float', convert_float),
('bool', convert_bool),
('time', convert_time),
('datetime', convert_datetime),
('date', convert_date),
])
register_converter = default_strconv.register_converter
unregister_converter = default_strconv.unregister_converter
get_converter = default_strconv.get_converter
convert = default_strconv.convert
convert_series = default_strconv.convert_series
convert_matrix = default_strconv.convert_matrix
infer = default_strconv.infer
infer_series = default_strconv.infer_series
infer_matrix = default_strconv.infer_matrix
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import sys
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_scm
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.base.scm_project_tree import ScmProjectTree
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.bin.engine_initializer import EngineInitializer
from pants.bin.repro import Reproducer
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper
from pants.build_graph.build_file_parser import BuildFileParser
from pants.build_graph.mutable_build_graph import MutableBuildGraph
from pants.engine.round_engine import RoundEngine
from pants.goal.context import Context
from pants.goal.goal import Goal
from pants.goal.run_tracker import RunTracker
from pants.help.help_printer import HelpPrinter
from pants.java.nailgun_executor import NailgunProcessGroup
from pants.pantsd.subsystem.pants_daemon_launcher import PantsDaemonLauncher
from pants.reporting.reporting import Reporting
from pants.source.source_root import SourceRootConfig
from pants.task.task import QuietTaskMixin
from pants.util.filtering import create_filters, wrap_filters
logger = logging.getLogger(__name__)
class GoalRunnerFactory(object):
def __init__(self, root_dir, options, build_config, run_tracker, reporting, build_graph=None,
exiter=sys.exit):
"""
:param str root_dir: The root directory of the pants workspace (aka the "build root").
:param Options options: The global, pre-initialized Options instance.
:param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param Reporting reporting: The global, pre-initialized Reporting instance.
:param BuildGraph build_graph: A BuildGraph instance (for graph reuse, optional).
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).
"""
self._root_dir = root_dir
self._options = options
self._build_config = build_config
self._run_tracker = run_tracker
self._reporting = reporting
self._exiter = exiter
self._goals = []
self._targets = []
self._requested_goals = self._options.goals
self._target_specs = self._options.target_specs
self._help_request = self._options.help_request
self._global_options = options.for_global_scope()
self._tag = self._global_options.tag
self._fail_fast = self._global_options.fail_fast
# Will be provided through context.address_mapper.build_ignore_patterns.
self._explain = self._global_options.explain
self._kill_nailguns = self._global_options.kill_nailguns
pants_ignore = self._global_options.pants_ignore or []
self._project_tree = self._get_project_tree(self._global_options.build_file_rev, pants_ignore)
self._build_file_parser = BuildFileParser(self._build_config, self._root_dir)
build_ignore_patterns = self._global_options.ignore_patterns or []
self._address_mapper = BuildFileAddressMapper(
self._build_file_parser,
self._project_tree,
build_ignore_patterns,
exclude_target_regexps=self._global_options.exclude_target_regexp
)
self._build_graph = self._select_buildgraph(self._global_options.enable_v2_engine,
self._global_options.pants_ignore,
build_graph)
def _select_buildgraph(self, use_engine, path_ignore_patterns, cached_buildgraph=None):
"""Selects a BuildGraph to use then constructs and returns it.
:param bool use_engine: Whether or not to use the v2 engine to construct the BuildGraph.
:param list path_ignore_patterns: The path ignore patterns from `--pants-ignore`.
:param LegacyBuildGraph cached_buildgraph: A cached graph to reuse, if available.
"""
if cached_buildgraph is not None:
return cached_buildgraph
elif use_engine:
root_specs = EngineInitializer.parse_commandline_to_spec_roots(options=self._options,
build_root=self._root_dir)
graph_helper = EngineInitializer.setup_legacy_graph(path_ignore_patterns)
return graph_helper.create_graph(root_specs)
else:
return MutableBuildGraph(self._address_mapper)
def _get_project_tree(self, build_file_rev, pants_ignore):
"""Creates the project tree for build files for use in a given pants run."""
if build_file_rev:
return ScmProjectTree(self._root_dir, get_scm(), build_file_rev, pants_ignore)
else:
return FileSystemProjectTree(self._root_dir, pants_ignore)
def _expand_goals(self, goals):
"""Check and populate the requested goals for a given run."""
for goal in goals:
try:
self._address_mapper.resolve_spec(goal)
logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be "
"a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
except AddressLookupError:
pass
if self._help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result)
self._goals.extend([Goal.by_name(goal) for goal in goals])
def _expand_specs(self, spec_strs, fail_fast):
"""Populate the BuildGraph and target list from a set of input specs."""
with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]):
def filter_for_tag(tag):
return lambda target: tag in map(str, target.tags)
tag_filter = wrap_filters(create_filters(self._tag, filter_for_tag))
# Parse all specs into unique Spec objects.
spec_parser = CmdLineSpecParser(self._root_dir)
specs = OrderedSet()
for spec_str in spec_strs:
specs.add(spec_parser.parse_spec(spec_str))
# Then scan them to generate unique Addresses.
for address in self._build_graph.inject_specs_closure(specs, fail_fast):
target = self._build_graph.get_target(address)
if tag_filter(target):
self._targets.append(target)
def _maybe_launch_pantsd(self):
"""Launches pantsd if configured to do so."""
if self._global_options.enable_pantsd:
# Avoid runtracker output if pantsd is disabled. Otherwise, show up to inform the user its on.
with self._run_tracker.new_workunit(name='pantsd', labels=[WorkUnitLabel.SETUP]):
pantsd_launcher = PantsDaemonLauncher.Factory.global_instance().create(EngineInitializer)
pantsd_launcher.maybe_launch()
def _is_quiet(self):
return any(goal.has_task_of_type(QuietTaskMixin) for goal in self._goals) or self._explain
def _setup_context(self):
self._maybe_launch_pantsd()
with self._run_tracker.new_workunit(name='setup', labels=[WorkUnitLabel.SETUP]):
self._expand_goals(self._requested_goals)
self._expand_specs(self._target_specs, self._fail_fast)
# Now that we've parsed the bootstrap BUILD files, and know about the SCM system.
self._run_tracker.run_info.add_scm_info()
# Update the Reporting settings now that we have options and goal info.
invalidation_report = self._reporting.update_reporting(self._global_options,
self._is_quiet(),
self._run_tracker)
context = Context(options=self._options,
run_tracker=self._run_tracker,
target_roots=self._targets,
requested_goals=self._requested_goals,
build_graph=self._build_graph,
build_file_parser=self._build_file_parser,
address_mapper=self._address_mapper,
invalidation_report=invalidation_report)
return context, invalidation_report
def setup(self):
context, invalidation_report = self._setup_context()
return GoalRunner(context=context,
goals=self._goals,
kill_nailguns=self._kill_nailguns,
run_tracker=self._run_tracker,
invalidation_report=invalidation_report,
exiter=self._exiter)
class GoalRunner(object):
"""Lists installed goals or else executes a named goal."""
Factory = GoalRunnerFactory
def __init__(self, context, goals, run_tracker, invalidation_report, kill_nailguns,
exiter=sys.exit):
"""
:param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.
:param list[Goal] goals: The list of goals to act on.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param InvalidationReport invalidation_report: An InvalidationReport instance (Optional).
:param bool kill_nailguns: Whether or not to kill nailguns after the run.
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).
"""
self._context = context
self._goals = goals
self._run_tracker = run_tracker
self._invalidation_report = invalidation_report
self._kill_nailguns = kill_nailguns
self._exiter = exiter
@classmethod
def subsystems(cls):
# Subsystems used outside of any task.
return {SourceRootConfig, Reporting, Reproducer, RunTracker, PantsDaemonLauncher.Factory}
def _execute_engine(self):
workdir = self._context.options.for_global_scope().pants_workdir
if not workdir.endswith('.pants.d'):
self._context.log.error('Pants working directory should end with \'.pants.d\', currently it is {}\n'
.format(workdir))
return 1
unknown_goals = [goal.name for goal in self._goals if not goal.ordered_task_names()]
if unknown_goals:
self._context.log.error('Unknown goal(s): {}\n'.format(' '.join(unknown_goals)))
return 1
engine = RoundEngine()
result = engine.execute(self._context, self._goals)
if self._invalidation_report:
self._invalidation_report.report()
return result
def run(self):
should_kill_nailguns = self._kill_nailguns
try:
result = self._execute_engine()
if result:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
except KeyboardInterrupt:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
# On ctrl-c we always kill nailguns, otherwise they might keep running
# some heavyweight compilation and gum up the system during a subsequent run.
should_kill_nailguns = True
raise
except Exception:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
raise
finally:
# Must kill nailguns only after run_tracker.end() is called, otherwise there may still
# be pending background work that needs a nailgun.
if should_kill_nailguns:
# TODO: This is JVM-specific and really doesn't belong here.
# TODO: Make this more selective? Only kill nailguns that affect state?
# E.g., checkstyle may not need to be killed.
NailgunProcessGroup().killall()
return result
|
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from tornado.web import RequestHandler, HTTPError
from tornado import gen
import logging
import json
import inspect
import os
import pkgutil
from tools import model
from tools.utils import lcFirst, ucFirst, genModules
from solvers.optimizer import Optimizer
from solvers.clusterer import Clusterer
from solvers.classifier import Classifier
from conf import Conf
from mlbExceptions import SolverException
class SolversHandler(RequestHandler):
"""Handle API requests for solver-related data"""
@gen.coroutine
def getSolvers(self):
"""
Route: `GET /api/solvers/list`
Returns the list of existing solver objects
TODO: Add pagination system
"""
def desobjectidfy(solver):
solver['problemId'] = str(solver['problemId'])
return solver
solvers = yield model.getService('solvers').getAll(
orderBy={'implementation': 1, 'name': 1})
solvers = [desobjectidfy(solver) for solver in solvers]
self.write(json.dumps(solvers))
@gen.coroutine
def deleteSolverById(self):
"""
Route: `DELETE /api/solvers/byId`
Remove the solver given by id.
The argument `_id` is required.
"""
_id = self.get_argument('_id')
data = yield model.getService('solvers').deleteById(_id)
self.write(json.dumps(data))
def getSolverClasses(self):
"""
Route: `GET /api/solvers/implementations`
Return the list available solver classes, indexed by types.
This writes back to the client an object with the following structure:
`{<solverType>: {<className>: {
'description': <description>,
'parameters': [<parameter names to be defined by the class>]
}}`
"""
result = {
'optimizer': {},
'clusterer': {},
'classifier': {}
}
for moduleName in genModules(['solvers']):
classObj = {}
# for each module, get the actual implementation class
implemModule = __import__(
'solvers.%s' % moduleName, fromlist=[ucFirst(moduleName)])
implemClass = getattr(implemModule, ucFirst(moduleName))
# now find the arguments of the constructor, remove 'self' and
# 'name' which are not user-configurable parameters specific to
# this solver.
argspec = inspect.getargspec(implemClass.__init__)
argspec.args.remove('self')
argspec.args.remove('name')
argspec.args.remove('problem')
if argspec.defaults:
classObj['parameters'] = dict(
zip(argspec.args[-len(argspec.defaults):],
argspec.defaults))
else:
classObj['parameters'] = {}
# find the documentation of this object
classObj['description'] = inspect.cleandoc(
inspect.getdoc(implemClass))
# now find inheritance tree to know where this class should be
# saved.
implemClasses = inspect.getmro(implemClass)
if Optimizer in implemClasses and moduleName != 'optimizer':
result['optimizer'][ucFirst(moduleName)] = classObj
if Clusterer in implemClasses and moduleName != 'clusterer':
result['clusterer'][ucFirst(moduleName)] = classObj
if Classifier in implemClasses and moduleName != 'classifier':
result['classifier'][ucFirst(moduleName)] = classObj
self.write(json.dumps(result))
@gen.coroutine
def saveSolver(self):
"""
Route: `POST: /api/solvers/save`
Save a new or update an existing solver.
The following parameters are expected:
* name:string, name of this solver
* parameters:json-encoded dict, association between parameters'
name and value
* implementation:string, name of the related solver class
* visualization:string (optional), name of the script that contains the
visualization javascript code.
* problem:string, id of the problem this solver is designed to solve
* _id:string (optional), the _id of the document to update. If not
provided, an new document will be inserted.
Writes back the whole inserted or updated document
"""
name = self.get_argument('name')
parameters = json.loads(self.get_argument('parameters'))
implementation = self.get_argument('implementation')
visualization = self.get_argument('visualization', default=None)
problem = self.get_argument('problem')
_id = self.get_argument('_id', default=None)
# retrieve the type of this implementation
# TODO: make sure that the class 'implementation' exists
implemModule = __import__(
'solvers.%s' % lcFirst(implementation),
fromlist=[implementation])
implemClasses = inspect.getmro(getattr(implemModule, implementation))
solverType = None
if Optimizer in implemClasses:
solverType = 'optimizer'
if Clusterer in implemClasses:
solverType = 'clusterer'
if Classifier in implemClasses:
solverType = 'classifier'
# makes sure the implementation implements one of the solver interface
if solverType is None:
raise SolverException(
solverType, name, parameters,
"The given implementation (%s) does not implement any solver \
type interface." % (implementation))
# check that the given visualization exist
if visualization:
# this will raise an exception if the field does not exist
with open(os.path.join(*(
os.path.split(Conf['scriptFolders']['solverViews']) +
(visualization,)))):
pass
problemObj = yield model.getService('problems').getById(
problem, fields=['_id'])
if not problemObj:
raise SolverException(
solverType, name, parameters,
"The given problem (%s) does not exist!" % (problem))
# perform the actual insert/update
data = {
'type': solverType,
'name': name,
'parameters': parameters,
'visualization': visualization,
'problemId': problem,
'implementation': implementation
}
if _id is None:
_id = yield model.getService('solvers').insert(**data)
else:
yield model.getService('solvers').set(_id, data)
data = yield model.getService('solvers').getById(_id)
self.write(json.dumps(data))
def get(self, action):
actions = {
'implementations': self.getSolverClasses,
'list': self.getSolvers
}
if action in actions:
return actions[action]()
raise HTTPError(404, 'Not Found')
def post(self, action):
actions = {
'save': self.saveSolver
}
if action in actions:
return actions[action]()
raise HTTPError(404, 'Not Found')
def delete(self, action):
actions = {
'byId': self.deleteSolverById
}
if action in actions:
return actions[action]()
raise HTTPError(404, 'Not Found')
|