repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
lbeltrame/bcbio-nextgen | bcbio/ngsalign/star.py | 2 | 14590 | import os
import sys
import shutil
import subprocess
import contextlib
from collections import namedtuple
import bcbio.bed as bed
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists, is_gzipped)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import postalign
from bcbio.bam import fastq
from bcbio.heterogeneity import chromhacks
CLEANUP_FILES = ["Aligned.out.sam", "Log.out", "Log.progress.out"]
ALIGN_TAGS = ["NH", "HI", "NM", "MD", "AS"]
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
if not ref_file:
logger.error("STAR index not found. We don't provide the STAR indexes "
"by default because they are very large. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners star --genomes genome-build-name --data")
sys.exit(1)
max_hits = 10
srna = True if data["analysis"].lower().startswith("smallrna-seq") else False
srna_opts = ""
if srna:
max_hits = 1000
srna_opts = "--alignIntronMax 1"
config = data["config"]
star_dirs = _get_star_dirnames(align_dir, data, names)
if file_exists(star_dirs.final_out):
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
out_log_file = os.path.join(align_dir, dd.get_lane(data) + "Log.final.out")
data = dd.update_summary_qc(data, "star", base=out_log_file)
return data
star_path = config_utils.get_program("STAR", config)
def _unpack_fastq(f):
"""Use process substitution instead of readFilesCommand for gzipped inputs.
Prevents issues on shared filesystems that don't support FIFO:
https://github.com/alexdobin/STAR/issues/143
"""
if f and is_gzipped(f):
return "<(gunzip -c %s)" % f
else:
return f
fastq_files = (" ".join([_unpack_fastq(fastq_file), _unpack_fastq(pair_file)])
if pair_file else _unpack_fastq(fastq_file))
num_cores = dd.get_num_cores(data)
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
if ref_file.endswith("chrLength"):
ref_file = os.path.dirname(ref_file)
if index_has_alts(ref_file):
logger.error(
"STAR is being run on an index with ALTs which STAR is not "
"designed for. Please remake your STAR index or use an ALT-aware "
"aligner like hisat2")
sys.exit(1)
with file_transaction(data, align_dir) as tx_align_dir:
tx_1pass_dir = tx_align_dir + "1pass"
tx_star_dirnames = _get_star_dirnames(tx_1pass_dir, data, names)
tx_out_dir, tx_out_file, tx_out_prefix, tx_final_out = tx_star_dirnames
safe_makedir(tx_1pass_dir)
safe_makedir(tx_out_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq_files} "
"--runThreadN {num_cores} --outFileNamePrefix {tx_out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax {max_hits} "
"--outStd BAM_Unsorted {srna_opts} "
"--limitOutSJcollapsed 2000000 "
"--outSAMtype BAM Unsorted "
"--outSAMmapqUnique 60 "
"--outSAMunmapped Within --outSAMattributes %s " % " ".join(ALIGN_TAGS))
cmd += _add_sj_index_commands(fastq_file, ref_file, gtf_file) if not srna else ""
cmd += _read_group_option(names)
if dd.get_fusion_caller(data):
if "arriba" in dd.get_fusion_caller(data):
cmd += (
"--chimSegmentMin 10 --chimOutType WithinBAM "
"--chimJunctionOverhangMin 10 --chimScoreMin 1 --chimScoreDropMax 30 "
"--chimScoreJunctionNonGTAG 0 --chimScoreSeparation 1 "
"--alignSJstitchMismatchNmax 5 -1 5 5 "
"--chimSegmentReadGapMax 3 "
"--peOverlapNbasesMin 10 "
"--alignSplicedMateMapLminOverLmate 0.5 ")
else:
cmd += (" --chimSegmentMin 12 --chimJunctionOverhangMin 12 "
"--chimScoreDropMax 30 --chimSegmentReadGapMax 5 "
"--chimScoreSeparation 5 ")
if "oncofuse" in dd.get_fusion_caller(data):
cmd += "--chimOutType Junctions "
else:
cmd += "--chimOutType WithinBAM "
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded" and not srna:
cmd += " --outSAMstrandField intronMotif "
if not srna:
cmd += " --quantMode TranscriptomeSAM "
resources = config_utils.get_resources("star", data["config"])
if resources.get("options", []):
cmd += " " + " ".join([str(x) for x in resources.get("options", [])])
cmd += " | " + postalign.sam_to_sortbam_cl(data, tx_final_out)
cmd += " > {tx_final_out} "
run_message = "Running 1st pass of STAR aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
sjfile = get_splicejunction_file(tx_out_dir, data)
sjflag = f"--sjdbFileChrStartEnd {sjfile}" if sjfile else ""
tx_star_dirnames = _get_star_dirnames(tx_align_dir, data, names)
tx_out_dir, tx_out_file, tx_out_prefix, tx_final_out = tx_star_dirnames
safe_makedir(tx_align_dir)
safe_makedir(tx_out_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq_files} "
"--runThreadN {num_cores} --outFileNamePrefix {tx_out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax {max_hits} "
"--outStd BAM_Unsorted {srna_opts} "
"--limitOutSJcollapsed 2000000 "
"{sjflag} "
"--outSAMtype BAM Unsorted "
"--outSAMmapqUnique 60 "
"--outSAMunmapped Within --outSAMattributes %s " % " ".join(ALIGN_TAGS))
cmd += _add_sj_index_commands(fastq_file, ref_file, gtf_file) if not srna else ""
cmd += _read_group_option(names)
if dd.get_fusion_caller(data):
if "arriba" in dd.get_fusion_caller(data):
cmd += (
"--chimSegmentMin 10 --chimOutType WithinBAM SoftClip Junctions "
"--chimJunctionOverhangMin 10 --chimScoreMin 1 --chimScoreDropMax 30 "
"--chimScoreJunctionNonGTAG 0 --chimScoreSeparation 1 "
"--alignSJstitchMismatchNmax 5 -1 5 5 "
"--chimSegmentReadGapMax 3 ")
else:
cmd += (" --chimSegmentMin 12 --chimJunctionOverhangMin 12 "
"--chimScoreDropMax 30 --chimSegmentReadGapMax 5 "
"--chimScoreSeparation 5 ")
if "oncofuse" in dd.get_fusion_caller(data):
cmd += "--chimOutType Junctions "
else:
cmd += "--chimOutType WithinBAM "
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded" and not srna:
cmd += " --outSAMstrandField intronMotif "
if not srna:
cmd += " --quantMode TranscriptomeSAM "
resources = config_utils.get_resources("star", data["config"])
if resources.get("options", []):
cmd += " " + " ".join([str(x) for x in resources.get("options", [])])
cmd += " | " + postalign.sam_to_sortbam_cl(data, tx_final_out)
cmd += " > {tx_final_out} "
run_message = "Running 2nd pass of STAR aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
out_log_file = os.path.join(align_dir, dd.get_lane(data) + "Log.final.out")
data = dd.update_summary_qc(data, "star", base=out_log_file)
return data
StarOutDirs = namedtuple(
'StarOutDirs',
['out_dir', 'out_file', 'out_prefix', 'final_out']
)
def _get_star_dirnames(align_dir, data, names):
ALIGNED_OUT_FILE = "Aligned.out.sam"
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_file = out_prefix + ALIGNED_OUT_FILE
out_dir = os.path.join(align_dir, "%s_star" % dd.get_lane(data))
final_out = os.path.join(out_dir, "{0}.bam".format(names["sample"]))
return StarOutDirs(out_dir, out_file, out_prefix, final_out)
def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd
def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and
(file_exists(os.path.join(ref_file, "transcriptInfo.tab"))))
def _update_data(align_file, out_dir, names, data):
data = dd.set_work_bam(data, align_file)
data = dd.set_align_bam(data, align_file)
transcriptome_file = _move_transcriptome_file(out_dir, names)
data = dd.set_transcriptome_bam(data, transcriptome_file)
sjfile = get_splicejunction_file(out_dir, data)
if sjfile:
data = dd.set_starjunction(data, sjfile)
sjbed = junction2bed(sjfile)
data = dd.set_junction_bed(data, sjbed)
sjchimfile = get_chimericjunction_file(out_dir, data)
data = dd.set_chimericjunction(data, sjchimfile)
return data
def _move_transcriptome_file(out_dir, names):
out_file = os.path.join(out_dir, "{0}.transcriptome.bam".format(names["sample"]))
star_file = os.path.join(out_dir, os.pardir,
"{0}Aligned.toTranscriptome.out.bam".format(names["lane"]))
# if the out_file or the star_file doesn't exist, we didn't run the
# transcriptome mapping
if not file_exists(out_file):
if not file_exists(star_file):
return None
else:
shutil.move(star_file, out_file)
return out_file
def _read_group_option(names):
rg_id = names["rg"]
rg_sample = names["sample"]
rg_library = names["pl"]
rg_platform_unit = names["pu"]
rg_lb = ("LB:%s " % names.get("lb")) if names.get("lb") else ""
return (" --outSAMattrRGline ID:{rg_id} PL:{rg_library} "
"PU:{rg_platform_unit} SM:{rg_sample} {rg_lb}").format(**locals())
def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star")
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_transcriptome_gtf(data, dd.get_gtf_file(data))
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def get_star_version(data):
star_path = config_utils.get_program("STAR", dd.get_config(data))
cmd = "%s --version" % star_path
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
for line in stdout:
if "STAR_" in line:
version = line.split("STAR_")[1].strip()
return version
def get_chimericjunction_file(out_dir, data):
"""
locate the chimeric splice junction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, f"{samplename}Chimeric.out.junction")
if file_exists(sjfile):
return sjfile
else:
return None
def get_splicejunction_file(out_dir, data):
"""
locate the splicejunction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, f"{samplename}SJ.out.tab")
if file_exists(sjfile):
return sjfile
else:
return None
def junction2bed(junction_file):
"""
reformat the STAR junction file to BED3 format, one end of the splice junction per line
"""
base, _ = os.path.splitext(junction_file)
out_file = base + "-minimized.bed"
if file_exists(out_file):
return out_file
if not file_exists(junction_file):
return None
with file_transaction(out_file) as tx_out_file:
with open(junction_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
tokens = line.split()
chrom, sj1, sj2 = tokens[0:3]
if int(sj1) > int(sj2):
tmp = sj1
sj1 = sj2
sj2 = tmp
out_handle.write("\t".join([chrom, sj1, sj1]) + "\n")
out_handle.write("\t".join([chrom, sj2, sj2]) + "\n")
minimize = bed.minimize(tx_out_file)
minimize.saveas(tx_out_file)
return out_file
def index_has_alts(ref_file):
name_file = os.path.join(os.path.dirname(ref_file), "chrName.txt")
with open(name_file) as in_handle:
names = [x.strip() for x in in_handle.readlines()]
has_alts = [chromhacks.is_alt(chrom) for chrom in names]
return any(has_alts)
| mit |
francisco-dlp/hyperspy | hyperspy/tests/__init__.py | 4 | 1870 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import warnings
from hyperspy.defaults_parser import preferences
preferences.General.show_progressbar = False
# Check if we should fail on external deprecation messages
fail_on_external = os.environ.pop('FAIL_ON_EXTERNAL_DEPRECATION', False)
if isinstance(fail_on_external, str):
fail_on_external = (fail_on_external.lower() in
['true', 't', '1', 'yes', 'y', 'set'])
if fail_on_external:
warnings.filterwarnings(
'error', category=DeprecationWarning)
# Travis setup has these warnings, so ignore:
warnings.filterwarnings(
'ignore',
r"BaseException\.message has been deprecated as of Python 2\.6",
DeprecationWarning)
# Don't care about warnings in hyperspy in this mode!
warnings.filterwarnings('default', module="hyperspy")
else:
# Fall-back filter: Error
warnings.simplefilter('error')
warnings.filterwarnings(
'ignore', "Failed to import the optional scikit image package",
UserWarning)
# We allow extrernal warnings:
warnings.filterwarnings('default',
module="(?!hyperspy)")
| gpl-3.0 |
sdgdsffdsfff/QConf | test/unit/gtest/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email [email protected] if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| bsd-2-clause |
dablak/boto | boto/resultset.py | 20 | 6557 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
| mit |
windinthew/audacity | lib-src/lv2/lv2/plugins/eg-fifths.lv2/waflib/Logs.py | 196 | 4755 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,traceback,sys
_nocolor=os.environ.get('NOCOLOR','no')not in('no','0','false')
try:
if not _nocolor:
import waflib.ansiterm
except ImportError:
pass
try:
import threading
except ImportError:
if not'JOBS'in os.environ:
os.environ['JOBS']='1'
else:
wlock=threading.Lock()
class sync_stream(object):
def __init__(self,stream):
self.stream=stream
self.encoding=self.stream.encoding
def write(self,txt):
try:
wlock.acquire()
self.stream.write(txt)
self.stream.flush()
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
def isatty(self):
return self.stream.isatty()
if not os.environ.get('NOSYNC',False):
if id(sys.stdout)==id(sys.__stdout__):
sys.stdout=sync_stream(sys.stdout)
sys.stderr=sync_stream(sys.stderr)
import logging
LOG_FORMAT="%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s"
HOUR_FORMAT="%H:%M:%S"
zones=''
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
got_tty=not os.environ.get('TERM','dumb')in['dumb','emacs']
if got_tty:
try:
got_tty=sys.stderr.isatty()and sys.stdout.isatty()
except AttributeError:
got_tty=False
if(not got_tty and os.environ.get('TERM','dumb')!='msys')or _nocolor:
colors_lst['USE']=False
def get_term_cols():
return 80
try:
import struct,fcntl,termios
except ImportError:
pass
else:
if got_tty:
def get_term_cols_real():
dummy_lines,cols=struct.unpack("HHHH",fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ,struct.pack("HHHH",0,0,0,0)))[:2]
return cols
try:
get_term_cols_real()
except Exception:
pass
else:
get_term_cols=get_term_cols_real
get_term_cols.__doc__="""
Get the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if not colors_lst['USE']:return''
return colors_lst.get(cl,'')
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=None):
pass
def filter(self,rec):
rec.c1=colors.PINK
rec.c2=colors.NORMAL
rec.zone=rec.module
if rec.levelno>=logging.INFO:
if rec.levelno>=logging.ERROR:
rec.c1=colors.RED
elif rec.levelno>=logging.WARNING:
rec.c1=colors.YELLOW
else:
rec.c1=colors.GREEN
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
if rec.levelno>=logging.WARNING or rec.levelno==logging.INFO:
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
return'%s%s%s'%(rec.c1,msg,rec.c2)
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File "%s", line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error("\n".join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=logging.StreamHandler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=10000):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def pprint(col,str,label='',sep='\n'):
sys.stderr.write("%s%s%s %s%s"%(colors(col),str,colors.NORMAL,label,sep))
| gpl-2.0 |
bob-white/UnityIronPythonConsole | Assets/IronPythonConsole/Plugins/Lib/repr.py | 417 | 4296 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr","repr"]
import __builtin__
from itertools import islice
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 20
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
s = __builtin__.repr(x)
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = __builtin__.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = __builtin__.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_long(self, x, level):
s = __builtin__.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = __builtin__.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| mpl-2.0 |
matt-bernhardt/trapp | trapp/check_games.py | 1 | 1339 | # -*- coding: utf-8 -*-
from trapp.checker import Checker
from trapp.competition import Competition
class CheckerGames(Checker):
def reviewCompetition(self, competition, year):
self.log.message('Reviewing competition ' + str(competition))
# Get years this competition was held
sql = ("SELECT DISTINCT(YEAR(MatchTime)) AS MatchYear, "
" COUNT(ID) AS Games "
"FROM tbl_games "
"WHERE MatchTypeID = %s AND YEAR(MatchTime) >= %s "
"GROUP BY YEAR(MatchTime) "
"ORDER BY MatchYear ASC")
rs = self.db.query(sql, (competition, year, ))
if (rs.with_rows):
records = rs.fetchall()
for index, item in enumerate(records):
self.output.message(str(competition) + ',' +
str(item[0]) + ',' +
str(item[1]))
def checkGames(self):
# What year are we starting our checks
startYear = 1990
# Label Columns
self.output.message('Competition,Year,Games')
# Get Competitions list
c = Competition()
c.connectDB()
competitions = c.loadAll()
# Do work
[self.reviewCompetition(record['CompetitionID'], startYear)
for record
in competitions]
| gpl-2.0 |
jorik041/plaso | plaso/parsers/winreg_plugins/mrulistex.py | 1 | 18044 | # -*- coding: utf-8 -*-
"""This file contains MRUListEx Windows Registry plugins."""
import abc
import logging
import construct
from plaso.events import windows_events
from plaso.lib import binary
from plaso.parsers import winreg
from plaso.parsers.shared import shell_items
from plaso.parsers.winreg_plugins import interface
# A mixin class is used here to not to have the duplicate functionality
# to parse the MRUListEx Registry values. However multiple inheritance
# and thus mixins are to be used sparsely in this codebase, hence we need
# to find a better solution in not needing to distinguish between key and
# value plugins.
# TODO: refactor Registry key and value plugin to rid ourselves of the mixin.
class MRUListExPluginMixin(object):
"""Class for common MRUListEx Windows Registry plugin functionality."""
_MRULISTEX_STRUCT = construct.Range(
1, 500, construct.ULInt32(u'entry_number'))
@abc.abstractmethod
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
Returns:
A string containing the value.
"""
def _ParseMRUListExValue(self, key):
"""Parses the MRUListEx value in a given Registry key.
Args:
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
Returns:
A MRUListEx value generator, which returns the MRU index number
and entry value.
"""
mru_list_value = key.GetValue(u'MRUListEx')
# The key exists but does not contain a value named "MRUListEx".
if not mru_list_value:
return enumerate([])
try:
mru_list = self._MRULISTEX_STRUCT.parse(mru_list_value.data)
except construct.FieldError:
logging.warning(u'[{0:s}] Unable to parse the MRU key: {1:s}'.format(
self.NAME, key.path))
return enumerate([])
return enumerate(mru_list)
def _ParseMRUListExKey(
self, parser_mediator, key, registry_type=None, codepage=u'cp1252'):
"""Extract event objects from a MRUListEx Registry key.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey).
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
text_dict = {}
for entry_index, entry_number in self._ParseMRUListExValue(key):
# TODO: detect if list ends prematurely.
# MRU lists are terminated with 0xffffffff (-1).
if entry_number == 0xffffffff:
break
value_string = self._ParseMRUListExEntryValue(
parser_mediator, key, entry_index, entry_number, codepage=codepage)
value_text = u'Index: {0:d} [MRU Value {1:d}]'.format(
entry_index + 1, entry_number)
text_dict[value_text] = value_string
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict,
offset=key.offset, registry_type=registry_type,
source_append=': MRUListEx')
parser_mediator.ProduceEvent(event_object)
class MRUListExStringPlugin(interface.ValuePlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a string MRUListEx."""
NAME = u'mrulistex_string'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_VALUES = frozenset([u'MRUListEx', u'0'])
URLS = [
u'http://forensicartifacts.com/2011/02/recentdocs/',
u'https://github.com/libyal/winreg-kb/wiki/MRU-keys']
_STRING_STRUCT = construct.Struct(
u'string_and_shell_item',
construct.RepeatUntil(
lambda obj, ctx: obj == b'\x00\x00', construct.Field(u'string', 2)))
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, **unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif value.DataIsString():
value_string = value.data
elif value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-string MRUListEx entry value: {1:d} parsed as string '
u'in key: {2:s}.').format(self.NAME, entry_number, key.path))
utf16_stream = binary.ByteStreamCopyToUtf16Stream(value.data)
try:
value_string = utf16_stream.decode(u'utf-16-le')
except UnicodeDecodeError as exception:
value_string = binary.HexifyBuffer(utf16_stream)
logging.warning((
u'[{0:s}] Unable to decode UTF-16 stream: {1:s} in MRUListEx entry '
u'value: {2:d} in key: {3:s} with error: {4:s}').format(
self.NAME, value_string, entry_number, key.path, exception))
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
def Process(self, parser_mediator, key=None, codepage=u'cp1252', **kwargs):
"""Determine if we can process this Registry key or not.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Windows Registry key (instance of WinRegKey).
The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
# Prevent this plugin triggering on sub paths of non-string MRUListEx
# values.
if (u'BagMRU' in key.path or u'Explorer\\StreamMRU' in key.path or
u'\\Explorer\\ComDlg32\\OpenSavePidlMRU' in key.path):
return
super(MRUListExStringPlugin, self).Process(
parser_mediator, key=key, codepage=codepage)
class MRUListExShellItemListPlugin(interface.KeyPlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a shell item list MRUListEx."""
NAME = u'mrulistex_shell_item_list'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_KEYS = frozenset([
# The regular expression indicated a file extension (.jpg) or '*'.
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'OpenSavePidlMRU'),
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\StreamMRU'])
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, codepage=u'cp1252',
**unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
codepage: Optional extended ASCII string codepage. The default is cp1252.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif not value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
u'{2:s}.').format(self.NAME, entry_number, key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(key.path)
shell_items_parser.UpdateChainAndParse(
parser_mediator, value.data, None, codepage=codepage)
value_string = u'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
if key.name != u'OpenSavePidlMRU':
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
if key.name == u'OpenSavePidlMRU':
# For the OpenSavePidlMRU MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in key.GetSubkeys():
self._ParseMRUListExKey(
parser_mediator, subkey, registry_type=registry_type,
codepage=codepage)
class MRUListExStringAndShellItemPlugin(
interface.KeyPlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a string and shell item MRUListEx."""
NAME = u'mrulistex_string_and_shell_item'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_KEYS = frozenset([
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\RecentDocs'])
_STRING_AND_SHELL_ITEM_STRUCT = construct.Struct(
u'string_and_shell_item',
construct.RepeatUntil(
lambda obj, ctx: obj == b'\x00\x00', construct.Field(u'string', 2)),
construct.Anchor(u'shell_item'))
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, codepage=u'cp1252',
**unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
codepage: Optional extended ASCII string codepage. The default is cp1252.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif not value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
u'{2:s}.').format(self.NAME, entry_number, key.path))
elif value.data:
value_struct = self._STRING_AND_SHELL_ITEM_STRUCT.parse(value.data)
try:
# The struct includes the end-of-string character that we need
# to strip off.
path = b''.join(value_struct.string).decode(u'utf16')[:-1]
except UnicodeDecodeError as exception:
logging.warning((
u'[{0:s}] Unable to decode string MRUListEx entry value: {1:d} '
u'in key: {2:s} with error: {3:s}').format(
self.NAME, entry_number, key.path, exception))
path = u''
if path:
shell_item_list_data = value.data[value_struct.shell_item:]
if not shell_item_list_data:
logging.debug((
u'[{0:s}] Missing shell item in MRUListEx entry value: {1:d}'
u'in key: {2:s}').format(self.NAME, entry_number, key.path))
value_string = u'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(key.path)
shell_items_parser.UpdateChainAndParse(
parser_mediator, shell_item_list_data, None, codepage=codepage)
value_string = u'Path: {0:s}, Shell item: [{1:s}]'.format(
path, shell_items_parser.CopyToPath())
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
if key.name == u'RecentDocs':
# For the RecentDocs MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in key.GetSubkeys():
self._ParseMRUListExKey(
parser_mediator, subkey, registry_type=registry_type,
codepage=codepage)
class MRUListExStringAndShellItemListPlugin(
interface.KeyPlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a string and shell item list MRUListEx."""
NAME = u'mrulistex_string_and_shell_item_list'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_KEYS = frozenset([
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'LastVisitedPidlMRU')])
_STRING_AND_SHELL_ITEM_LIST_STRUCT = construct.Struct(
u'string_and_shell_item',
construct.RepeatUntil(
lambda obj, ctx: obj == b'\x00\x00', construct.Field(u'string', 2)),
construct.Anchor(u'shell_item_list'))
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, codepage=u'cp1252',
**unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
codepage: Optional extended ASCII string codepage. The default is cp1252.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif not value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
u'{2:s}.').format(self.NAME, entry_number, key.path))
elif value.data:
value_struct = self._STRING_AND_SHELL_ITEM_LIST_STRUCT.parse(value.data)
try:
# The struct includes the end-of-string character that we need
# to strip off.
path = b''.join(value_struct.string).decode(u'utf16')[:-1]
except UnicodeDecodeError as exception:
logging.warning((
u'[{0:s}] Unable to decode string MRUListEx entry value: {1:d} '
u'in key: {2:s} with error: {3:s}').format(
self.NAME, entry_number, key.path, exception))
path = u''
if path:
shell_item_list_data = value.data[value_struct.shell_item_list:]
if not shell_item_list_data:
logging.debug((
u'[{0:s}] Missing shell item in MRUListEx entry value: {1:d}'
u'in key: {2:s}').format(self.NAME, entry_number, key.path))
value_string = u'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(key.path)
shell_items_parser.UpdateChainAndParse(
parser_mediator, shell_item_list_data, None, codepage=codepage)
value_string = u'Path: {0:s}, Shell item path: {1:s}'.format(
path, shell_items_parser.CopyToPath())
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
winreg.WinRegistryParser.RegisterPlugins([
MRUListExStringPlugin, MRUListExShellItemListPlugin,
MRUListExStringAndShellItemPlugin, MRUListExStringAndShellItemListPlugin])
| apache-2.0 |
MangoMangoDevelopment/neptune | lib/std_msgs-0.5.10/gen/_Duration.py | 2 | 3637 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from std_msgs/Duration.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
class Duration(genpy.Message):
_md5sum = "3e286caf4241d664e55f3ad380e2ae46"
_type = "std_msgs/Duration"
_has_header = False #flag to mark the presence of a Header object
_full_text = """duration data
"""
__slots__ = ['data']
_slot_types = ['duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Duration, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.data is None:
self.data = genpy.Duration()
else:
self.data = genpy.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2i.pack(_x.data.secs, _x.data.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.data is None:
self.data = genpy.Duration()
end = 0
_x = self
start = end
end += 8
(_x.data.secs, _x.data.nsecs,) = _struct_2i.unpack(str[start:end])
self.data.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2i.pack(_x.data.secs, _x.data.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.data is None:
self.data = genpy.Duration()
end = 0
_x = self
start = end
end += 8
(_x.data.secs, _x.data.nsecs,) = _struct_2i.unpack(str[start:end])
self.data.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2i = struct.Struct("<2i")
| bsd-3-clause |
kivio/pysllo | docs/conf.py | 2 | 9751 | # -*- coding: utf-8 -*-
#
# Pysllo documentation build configuration file, created by
# sphinx-quickstart on Tue May 31 19:45:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pysllo'
copyright = u'2016, Marcin Karkocha'
author = u'Marcin Karkocha'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'manni'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'fixed_sidebar': True,
'analytics_id': 'UA-79713650-1',
'github_user': 'kivio',
'github_repo': 'pysllo',
'github_banner': True
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Pysllo v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = "pysllo2.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico)
# being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pysllodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pysllo.tex', u'Pysllo Documentation',
u'Marcin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysllo', u'Pysllo Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pysllo', u'Pysllo Documentation',
author, 'Pysllo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| bsd-3-clause |
cgcgbcbc/django-xadmin | xadmin/plugins/chart.py | 17 | 5683 |
import datetime
import decimal
import calendar
from django.template import loader
from django.http import HttpResponseNotFound
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.utils.encoding import smart_unicode
from django.db import models
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _, ugettext
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.dashboard import ModelBaseWidget, widget_manager
from xadmin.util import lookup_field, label_for_field, force_unicode, json
@widget_manager.register
class ChartWidget(ModelBaseWidget):
widget_type = 'chart'
description = _('Show models simple chart.')
template = 'xadmin/widgets/chart.html'
widget_icon = 'fa fa-bar-chart-o'
def convert(self, data):
self.list_params = data.pop('params', {})
self.chart = data.pop('chart', None)
def setup(self):
super(ChartWidget, self).setup()
self.charts = {}
self.one_chart = False
model_admin = self.admin_site._registry[self.model]
chart = self.chart
if hasattr(model_admin, 'data_charts'):
if chart and chart in model_admin.data_charts:
self.charts = {chart: model_admin.data_charts[chart]}
self.one_chart = True
if self.title is None:
self.title = model_admin.data_charts[chart].get('title')
else:
self.charts = model_admin.data_charts
if self.title is None:
self.title = ugettext(
"%s Charts") % self.model._meta.verbose_name_plural
def filte_choices_model(self, model, modeladmin):
return bool(getattr(modeladmin, 'data_charts', None)) and \
super(ChartWidget, self).filte_choices_model(model, modeladmin)
def get_chart_url(self, name, v):
return self.model_admin_url('chart', name) + "?" + urlencode(self.list_params)
def context(self, context):
context.update({
'charts': [{"name": name, "title": v['title'], 'url': self.get_chart_url(name, v)} for name, v in self.charts.items()],
})
# Media
def media(self):
return self.vendor('flot.js', 'xadmin.plugin.charts.js')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, (datetime.date, datetime.datetime)):
return calendar.timegm(o.timetuple()) * 1000
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class ChartsPlugin(BaseAdminPlugin):
data_charts = {}
def init_request(self, *args, **kwargs):
return bool(self.data_charts)
def get_chart_url(self, name, v):
return self.admin_view.model_admin_url('chart', name) + self.admin_view.get_query_string()
# Media
def get_media(self, media):
return media + self.vendor('flot.js', 'xadmin.plugin.charts.js')
# Block Views
def block_results_top(self, context, nodes):
context.update({
'charts': [{"name": name, "title": v['title'], 'url': self.get_chart_url(name, v)} for name, v in self.data_charts.items()],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_top.charts.html', context_instance=context))
class ChartsView(ListAdminView):
data_charts = {}
def get_ordering(self):
if 'order' in self.chart:
return self.chart['order']
else:
return super(ChartsView, self).get_ordering()
def get(self, request, name):
if name not in self.data_charts:
return HttpResponseNotFound()
self.chart = self.data_charts[name]
self.x_field = self.chart['x-field']
y_fields = self.chart['y-field']
self.y_fields = (
y_fields,) if type(y_fields) not in (list, tuple) else y_fields
datas = [{"data":[], "label": force_unicode(label_for_field(
i, self.model, model_admin=self))} for i in self.y_fields]
self.make_result_list()
for obj in self.result_list:
xf, attrs, value = lookup_field(self.x_field, obj, self)
for i, yfname in enumerate(self.y_fields):
yf, yattrs, yv = lookup_field(yfname, obj, self)
datas[i]["data"].append((value, yv))
option = {'series': {'lines': {'show': True}, 'points': {'show': False}},
'grid': {'hoverable': True, 'clickable': True}}
try:
xfield = self.opts.get_field(self.x_field)
if type(xfield) in (models.DateTimeField, models.DateField, models.TimeField):
option['xaxis'] = {'mode': "time", 'tickLength': 5}
if type(xfield) is models.DateField:
option['xaxis']['timeformat'] = "%y/%m/%d"
elif type(xfield) is models.TimeField:
option['xaxis']['timeformat'] = "%H:%M:%S"
else:
option['xaxis']['timeformat'] = "%y/%m/%d %H:%M:%S"
except Exception:
pass
option.update(self.chart.get('option', {}))
content = {'data': datas, 'option': option}
result = json.dumps(content, cls=JSONEncoder, ensure_ascii=False)
return HttpResponse(result)
site.register_plugin(ChartsPlugin, ListAdminView)
site.register_modelview(r'^chart/(.+)/$', ChartsView, name='%s_%s_chart')
| bsd-3-clause |
omarkhan/ansible-modules-core | cloud/google/gce_pd.py | 130 | 9532 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <[email protected]>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
pfmooney/dd-agent | checks.d/mesos.py | 22 | 5151 | # stdlib
from hashlib import md5
import time
# 3rd party
import requests
# project
from checks import AgentCheck
class Mesos(AgentCheck):
SERVICE_CHECK_NAME = "mesos.can_connect"
def check(self, instance):
"""
DEPRECATED:
This generic Mesosphere check is deprecated not actively developed anymore. It will be
removed in a future version of the Datadog Agent.
Please head over to the Mesosphere master and slave specific checks.
"""
self.warning("This check is deprecated in favor of Mesos master and slave specific checks."
" It will be removed in a future version of the Datadog Agent.")
if 'url' not in instance:
raise Exception('Mesos instance missing "url" value.')
# Load values from the instance config
url = instance['url']
instance_tags = instance.get('tags', [])
default_timeout = self.init_config.get('default_timeout', 5)
timeout = float(instance.get('timeout', default_timeout))
response = self.get_master_roles(url, timeout)
if response is not None:
for role in response['roles']:
tags = ['role:' + role['name']] + instance_tags
self.gauge('mesos.role.frameworks', len(role['frameworks']), tags=tags)
self.gauge('mesos.role.weight', role['weight'], tags=tags)
resources = role['resources']
for attr in ['cpus','mem']:
if attr in resources:
self.gauge('mesos.role.' + attr, resources[attr], tags=tags)
response = self.get_master_stats(url, timeout)
if response is not None:
tags = instance_tags
for key in iter(response):
self.gauge('mesos.stats.' + key, response[key], tags=tags)
response = self.get_master_state(url, timeout)
if response is not None:
tags = instance_tags
for attr in ['deactivated_slaves','failed_tasks','finished_tasks','killed_tasks','lost_tasks','staged_tasks','started_tasks']:
self.gauge('mesos.state.' + attr, response[attr], tags=tags)
for framework in response['frameworks']:
tags = ['framework:' + framework['id']] + instance_tags
resources = framework['resources']
for attr in ['cpus','mem']:
if attr in resources:
self.gauge('mesos.state.framework.' + attr, resources[attr], tags=tags)
for slave in response['slaves']:
tags = ['mesos','slave:' + slave['id']] + instance_tags
resources = slave['resources']
for attr in ['cpus','mem','disk']:
if attr in resources:
self.gauge('mesos.state.slave.' + attr, resources[attr], tags=tags)
def get_master_roles(self, url, timeout):
return self.get_json(url + "/master/roles.json", timeout)
def get_master_stats(self, url, timeout):
return self.get_json(url + "/master/stats.json", timeout)
def get_master_state(self, url, timeout):
return self.get_json(url + "/master/state.json", timeout)
def get_json(self, url, timeout):
# Use a hash of the URL as an aggregation key
aggregation_key = md5(url).hexdigest()
tags = ["url:%s" % url]
msg = None
status = None
try:
r = requests.get(url, timeout=timeout)
if r.status_code != 200:
self.status_code_event(url, r, aggregation_key)
status = AgentCheck.CRITICAL
msg = "Got %s when hitting %s" % (r.status_code, url)
else:
status = AgentCheck.OK
msg = "Mesos master instance detected at %s " % url
except requests.exceptions.Timeout as e:
# If there's a timeout
self.timeout_event(url, timeout, aggregation_key)
msg = "%s seconds timeout when hitting %s" % (timeout, url)
status = AgentCheck.CRITICAL
except Exception as e:
msg = str(e)
status = AgentCheck.CRITICAL
finally:
self.service_check(self.SERVICE_CHECK_NAME, status, tags=tags, message=msg)
if status is AgentCheck.CRITICAL:
self.warning(msg)
return None
return r.json()
def timeout_event(self, url, timeout, aggregation_key):
self.event({
'timestamp': int(time.time()),
'event_type': 'http_check',
'msg_title': 'URL timeout',
'msg_text': '%s timed out after %s seconds.' % (url, timeout),
'aggregation_key': aggregation_key
})
def status_code_event(self, url, r, aggregation_key):
self.event({
'timestamp': int(time.time()),
'event_type': 'http_check',
'msg_title': 'Invalid reponse code for %s' % url,
'msg_text': '%s returned a status of %s' % (url, r.status_code),
'aggregation_key': aggregation_key
})
| bsd-3-clause |
40223137/cdag7test37 | static/Brython3.1.3-20150514-095342/Lib/weakref.py | 769 | 11495 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| gpl-3.0 |
cloudwork/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Scanner/Fortran.py | 61 | 14347 | """SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py 5134 2010/08/16 23:02:40 bdeegan"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
LordDamionDevil/Lony | lib/discord/member.py | 14 | 8164 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .user import User
from .game import Game
from .permissions import Permissions
from . import utils
from .enums import Status, ChannelType
from .colour import Colour
import copy
class VoiceState:
"""Represents a Discord user's voice state.
Attributes
------------
deaf: bool
Indicates if the user is currently deafened by the server.
mute: bool
Indicates if the user is currently muted by the server.
self_mute: bool
Indicates if the user is currently muted by their own accord.
self_deaf: bool
Indicates if the user is currently deafened by their own accord.
is_afk: bool
Indicates if the user is currently in the AFK channel in the server.
voice_channel: Optional[Union[:class:`Channel`, :class:`PrivateChannel`]]
The voice channel that the user is currently connected to. None if the user
is not currently in a voice channel.
"""
__slots__ = [ 'session_id', 'deaf', 'mute', 'self_mute',
'self_deaf', 'is_afk', 'voice_channel' ]
def __init__(self, **kwargs):
self.session_id = kwargs.get('session_id')
self._update_voice_state(**kwargs)
def _update_voice_state(self, **kwargs):
self.self_mute = kwargs.get('self_mute', False)
self.self_deaf = kwargs.get('self_deaf', False)
self.is_afk = kwargs.get('suppress', False)
self.mute = kwargs.get('mute', False)
self.deaf = kwargs.get('deaf', False)
self.voice_channel = kwargs.get('voice_channel')
def flatten_voice_states(cls):
for attr in VoiceState.__slots__:
def getter(self, x=attr):
return getattr(self.voice, x)
setattr(cls, attr, property(getter))
return cls
@flatten_voice_states
class Member(User):
"""Represents a Discord member to a :class:`Server`.
This is a subclass of :class:`User` that extends more functionality
that server members have such as roles and permissions.
Attributes
----------
voice: :class:`VoiceState`
The member's voice state. Properties are defined to mirror access of the attributes.
e.g. ``Member.is_afk`` is equivalent to `Member.voice.is_afk``.
roles
A list of :class:`Role` that the member belongs to. Note that the first element of this
list is always the default '@everyone' role.
joined_at : `datetime.datetime`
A datetime object that specifies the date and time in UTC that the member joined the server for
the first time.
status : :class:`Status`
The member's status. There is a chance that the status will be a ``str``
if it is a value that is not recognised by the enumerator.
game : :class:`Game`
The game that the user is currently playing. Could be None if no game is being played.
server : :class:`Server`
The server that the member belongs to.
nick : Optional[str]
The server specific nickname of the user.
"""
__slots__ = [ 'roles', 'joined_at', 'status', 'game', 'server', 'nick', 'voice' ]
def __init__(self, **kwargs):
super().__init__(**kwargs.get('user'))
self.voice = VoiceState(**kwargs)
self.joined_at = utils.parse_time(kwargs.get('joined_at'))
self.roles = kwargs.get('roles', [])
self.status = Status.offline
game = kwargs.get('game', {})
self.game = Game(**game) if game else None
self.server = kwargs.get('server', None)
self.nick = kwargs.get('nick', None)
def _update_voice_state(self, **kwargs):
self.voice.self_mute = kwargs.get('self_mute', False)
self.voice.self_deaf = kwargs.get('self_deaf', False)
self.voice.is_afk = kwargs.get('suppress', False)
self.voice.mute = kwargs.get('mute', False)
self.voice.deaf = kwargs.get('deaf', False)
old_channel = getattr(self, 'voice_channel', None)
vc = kwargs.get('voice_channel')
if old_channel is None and vc is not None:
# we joined a channel
vc.voice_members.append(self)
elif old_channel is not None:
try:
# we either left a channel or we switched channels
old_channel.voice_members.remove(self)
except ValueError:
pass
finally:
# we switched channels
if vc is not None:
vc.voice_members.append(self)
self.voice.voice_channel = vc
def _copy(self):
ret = copy.copy(self)
ret.voice = copy.copy(self.voice)
return ret
@property
def colour(self):
"""A property that returns a :class:`Colour` denoting the rendered colour
for the member. If the default colour is the one rendered then an instance
of :meth:`Colour.default` is returned.
There is an alias for this under ``color``.
"""
default_colour = Colour.default()
# highest order of the colour is the one that gets rendered.
# if the highest is the default colour then the next one with a colour
# is chosen instead
if self.roles:
roles = sorted(self.roles, key=lambda r: r.position, reverse=True)
for role in roles:
if role.colour == default_colour:
continue
else:
return role.colour
return default_colour
color = colour
@property
def mention(self):
if self.nick:
return '<@!{}>'.format(self.id)
return '<@{}>'.format(self.id)
def mentioned_in(self, message):
mentioned = super().mentioned_in(message)
if mentioned:
return True
for role in message.role_mentions:
has_role = utils.get(self.roles, id=role.id) is not None
if has_role:
return True
return False
@property
def top_role(self):
"""Returns the member's highest role.
This is useful for figuring where a member stands in the role
hierarchy chain.
"""
if self.roles:
roles = sorted(self.roles, reverse=True)
return roles[0]
return None
@property
def server_permissions(self):
"""Returns the member's server permissions.
This only takes into consideration the server permissions
and not most of the implied permissions or any of the
channel permission overwrites. For 100% accurate permission
calculation, please use either :meth:`permissions_in` or
:meth:`Channel.permissions_for`.
This does take into consideration server ownership and the
administrator implication.
"""
if self.server.owner == self:
return Permissions.all()
base = Permissions.none()
for r in self.roles:
base.value |= r.permissions.value
if base.administrator:
return Permissions.all()
return base
| gpl-3.0 |
JeffsFernandes/cuidando2 | projeto/projeto/views.py | 2 | 32828 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyramid.view import view_config
from .models import Cidadao, Cidadao_twitter,Atividade, Atividade_cidadao, Atividade_orcamento, Dados_site, Midia, Midia_comentario, Midia_video, Denuncia, Midia_foto
#from .models import Cidadao, UsrTree, Atividade_cidadao
#from .models import Cidadao, MyModel, UsrTree
#por que MyModel?
from beaker.middleware import SessionMiddleware
from datetime import datetime
import itertools
from BTrees.OOBTree import OOBTree
import tweepy
import facebook
import urllib
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from pyramid_mailer.mailer import Mailer
#from facebook import Facebook
from pyramid.httpexceptions import (
HTTPFound,
HTTPNotFound,
#HTTPForbidden,
)
from pyramid.security import (
remember,
forget,
authenticated_userid,
)
from forms import (
merge_session_with_post,
record_to_appstruct,
FormCadastrar,
FormConfigurar,
FormContato,
FormLogin,
FormMapa,
FormInserirP,
FormOrcamento,
FormOrcamentoResp,
FormRecadSenha,
FormRSenha,
FormPesqMapa,
FormOrcFoto,
FormOrcVideo,
FormSeguirAtv,
FormDenuncia,
)
import deform
import transaction
@view_config(route_name='inicial', renderer='inicial.slim')
def my_view(request):
"""
Página inicial: resgata os contadores estatísticos e outros dados do site
"""
#del request.db["atualAtv"]
if not "dadosSite" in request.db:
request.db["dadosSite"] = Dados_site()
atualiz_atv = request.db['dadosSite'].atualiz_atv
qtde_atv_orc = request.db['dadosSite'].qtde_atv_orc
qtde_atv_usr = request.db['dadosSite'].qtde_atv_usr
qtde_usr = request.db['dadosSite'].qtde_usr
qtde_fotos = request.db['dadosSite'].qtde_fotos
qtde_videos = request.db['dadosSite'].qtde_videos
qtde_coment = request.db['dadosSite'].qtde_coment
destaque_atv = request.db['dadosSite'].destaque_atv
return {
'atualAtv': atualiz_atv,
'qtdeAtvOrc': qtde_atv_orc,
'qtdeAtvUsr': qtde_atv_usr,
'qtdeUsr': qtde_usr,
'qtdeFotos': qtde_fotos,
'qtdeVideos': qtde_videos,
'qtdeComent': qtde_coment,
'destaqueAtv': destaque_atv,
}
@view_config(
route_name='listaUsr',
renderer='listaUsuarios.slim',
permission='comum'
)
def listaUsr(request):
"""
Página para listar usuários cadastrados
"""
cidadaos = request.db['usrTree'].values()
return {
'cidadaos': cidadaos,
}
@view_config(
route_name='listaAtv',
renderer='listaAtividades.slim',
permission='comum'
)
def listaAtv(request):
"""
Página para listar atividades
"""
atividades = request.db['atvTree'].values()
return {
'atividades': atividades,
}
@view_config(route_name='cadastro', renderer='cadastro.slim')
def cadastro(request):
"""Cadastro de usuário"""
# soh eh rodado 1 vez... tem que colocar na configurcao ou coisa assim?...
# Ensure that a ’userdb’ key is present
# in the root
if not request.db.has_key("usrTree"):
request.db["usrTree"] = OOBTree()
esquema = FormCadastrar().bind(request=request)
esquema.title = "Cadastrar novo usuário"
form = deform.Form(esquema, buttons=('Cadastrar',))
if 'Cadastrar' in request.POST:
# Validação do formulário
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#deslogando usuário, caso haja algum
headers = forget(request)
# Criação e inserção
cidadao = Cidadao("","")
cidadao = merge_session_with_post(cidadao, request.POST.items())
#tchau lista
#request.db['cidadaos'][cidadao.email] = cidadao
request.db['usrTree'][cidadao.email] = cidadao
dadosSite = request.db['dadosSite']
#chama função para atualizar contadores
Dados_site.addUsr(dadosSite)
transaction.commit()
request.session.flash(u"Usuário registrado com sucesso.")
request.session.flash(u"Agora você já pode logar com ele.")
return HTTPFound(location=request.route_url('inicial'), headers = headers)
else:
# Apresentação do formulário
return {'form': form.render()}
@view_config(
route_name='configuracao',
renderer='configuracao.slim',
permission='basica'
)
def configuracao(request):
"""Configuração de usuário"""
cidadao = Cidadao("","")
cidadao = request.db["usrTree"][authenticated_userid(request)]
#verificar se cidadão está preenchido
appstruct = record_to_appstruct(cidadao)
esquema = FormConfigurar().bind(request=request)
esquema.title = "Configuração de usuário"
form = deform.Form(esquema, buttons=('Salvar', 'Excluir'))
if 'Salvar' in request.POST:
# Validação do formulário
cidadao = merge_session_with_post(cidadao, request.POST.items())
appstruct = record_to_appstruct(cidadao)
try:
esquema = FormConfigurar().bind(request=request)
esquema.title = "Configuração de usuário"
form = deform.Form(esquema, buttons=('Salvar', 'Excluir'))
form.render(appstruct)
appstruct = form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
transaction.commit()
return HTTPFound(location=request.route_url('usuario'))
elif 'Excluir' in request.POST:
del request.db["usrTree"][authenticated_userid(request)]
transaction.commit()
headers = forget(request)
return HTTPFound(location=request.route_url('inicial'))
else:
# Apresentação do formulário
return{'form':form.render(appstruct)}
@view_config(route_name='contato', renderer='contato.slim')
def contato(request):
"""Contato"""
# Import smtplib for the actual sending function
import smtplib
esquema = FormContato().bind(request=request)
esquema.title = "Entre em contato com o Cuidando"
form = deform.Form(esquema, buttons=('Enviar',))
if 'Enviar' in request.POST:
# Validação do formulário
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#sender = request.POST.get("email")
#receivers = ['[email protected]']
#message = request.POST.get("assunto")
try:
#s = smtplib.SMTP( [host [, port [, local_hostname]]] )
#s = smtplib.SMTP('pop.mail.yahoo.com.br',587)
#smtpObj.sendmail(sender, receivers, message)
#s.quit()
#mailer = get_mailer(request)
mailer = Mailer()
message = Message(
subject=request.POST.get("assunto"),
sender= request.POST.get("email"), #"[email protected]",
recipients=['[email protected]'],
body=request.POST.get("mensagem")
)
mailer.send(message)
transaction.commit()
print "Successfully sent email"
#except SMTPException:
except:
print "Error: unable to send email"
return HTTPFound(location=request.route_url('inicial'))
else:
# Apresentação do formulário
return {'form': form.render()}
@view_config(route_name='logout')
def logout(request):
"""Página para logout"""
headers = forget(request)
request.session.flash(u"Você foi deslogado.")
#request.session.pop_flash()
return HTTPFound(location=request.route_url('inicial'), headers=headers)
@view_config(route_name='loginTwitterAuth', renderer='loginTwitterAuth.slim',permission='comum')
def loginTwitterAuth(request):
"""
Loga usuário com conta do twitter já autorizada: chamado a partir do login = authTwitterAcc
testando com twitter da zi
"""
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ")
verifier = request.GET.get('oauth_verifier')
token = request.session.get('request_token')
#request.session.delete('request_token')
auth.set_request_token(token[0], token[1])
print '=============='
print token[0]
#auth.set_access_token(cidadao.twitter_key, cidadao.twitter_secret)
#teste com twitter da zi - acesso permanente à conta
#auth.set_access_token("91435451-hhGY5e7Ga2c3viHCV26kVN1vgLWQm0gJMvJHYOsbh", "rEeRld6tM4V45T1fKX6abNc8BMC7hDF1n6q0tuOKfi2ME")
auth.get_access_token(verifier)
twitterApi = tweepy.API(auth)
if twitterApi:
#cidadao = request.db["twtTree"][token[0]]
userInfo = twitterApi.me()
print userInfo.screen_name
cidadao = Cidadao_twitter()
#cidadao = [] #[str(userInfo.screen_name)]
if not userInfo.screen_name in request.db["twtTree"]:
#cidadao = Cidadao_twitter()
cidadao.nomeUsr = userInfo.screen_name
request.db['twtTree'][cidadao.nomeUsr] = cidadao
dadosSite = request.db['dadosSite']
#chama função para atualizar contadores
Dados_site.addUsr(dadosSite)
transaction.commit()
request.session.flash(u"Usuário registrado com sucesso.")
request.session.flash(u"Agora você já pode logar com ele.")
#print userInfo.__getstate__()
#print userInfo.email
headers = remember(request, userInfo.screen_name)
#headers = remember(request, "[email protected]")
request.session.flash(u"Logado com twitter")
return HTTPFound(location=request.route_url('usuario'), headers=headers)
else:
request.session.flash(u"Erro ao logar com twitter")
return HTTPFound(location=request.route_url('login'))
@view_config(route_name='authTwitter', renderer='authTwitter.slim',permission='comum')
def authTwitter(request):
"""
Autoriza twitter para a conta do usuário logado
chamado em configurações
"""
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ")
#token e secret da aplicação ->pegar no twitter
verifier = request.GET.get('oauth_verifier')
token = request.session.get('request_token')
#request.session.delete('request_token')
auth.set_request_token(token[0], token[1])
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print 'Error! Failed to get access token.'
auth.set_access_token(auth.access_token.key, auth.access_token.secret)
#auth.set_access_token("91435451-hhGY5e7Ga2c3viHCV26kVN1vgLWQm0gJMvJHYOsbh", "rEeRld6tM4V45T1fKX6abNc8BMC7hDF1n6q0tuOKfi2ME")
twitterApi = tweepy.API(auth)
if twitterApi:
userInfo = twitterApi.me()
cidadao = request.db["usrTree"][authenticated_userid(request)]
cidadao.twitter_key = auth.access_token.key
cidadao.twitter_secret = auth.access_token.secret
cidadao.login_twitter = userInfo.screen_name
transaction.commit()
#headers = remember(request, "[email protected]")
#headers = remember(request, "[email protected]")
request.session.flash(u"Sua conta do twitter foi conectada ao Cuidando")
return HTTPFound(location=request.route_url('usuario'), headers=headers)
else:
request.session.flash(u"Erro ao conectar com twitter")
return HTTPFound(location=request.route_url('login'))
@view_config(route_name='authTwitterAcc', renderer='authTwitterAcc.slim',permission='comum')
def authTwitterAcc(request):
"""
Apenas autoriza e redireciona usuário para twitter
"""
#autorização OAuth
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ", request.route_url('loginTwitterAuth'))
#token e secret da aplicação ->pegar no twitter
authUrl = auth.get_authorization_url(True)
request.session['request_token'] = (auth.request_token.key, auth.request_token.secret)
request.session.save()
try:
return HTTPFound(authUrl)
except tweepy.TweepError:
print 'Error! Failed to get request token.'
@view_config(route_name='loginTwitter', renderer='loginTwitter.slim',permission='comum')
def loginTwitter(request):
"""
Login com twitter:
- verificar se já foi autorizado o app
- guardar token de acesso em algum lugar
- permitir acesso ao site com esse novo objeto....
"""
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ")
#token e secret da aplicação ->pegar no twitter
verifier = request.GET.get('oauth_verifier')
token = request.session.get('request_token')
#request.session.delete('request_token')
auth.set_request_token(token[0], token[1])
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print 'Error! Failed to get access token.'
auth.set_access_token(auth.access_token.key, auth.access_token.secret)
#auth.set_access_token("91435451-hhGY5e7Ga2c3viHCV26kVN1vgLWQm0gJMvJHYOsbh", "rEeRld6tM4V45T1fKX6abNc8BMC7hDF1n6q0tuOKfi2ME")
twitterApi = tweepy.API(auth)
if twitterApi:
userInfo = twitterApi.me()
cidadao = request.db["usrTree"][authenticated_userid(request)]
cidadao.twitter_key = auth.access_token.key
cidadao.twitter_secret = auth.access_token.secret
cidadao.login_twitter = userInfo.screen_name
transaction.commit()
#headers = remember(request, "[email protected]")
#headers = remember(request, "[email protected]")
request.session.flash(u"Usuário logado com twitter")
return HTTPFound(location=request.route_url('usuario'), headers=headers)
else:
request.session.flash(u"Erro ao logar com twitter")
return HTTPFound(location=request.route_url('login'))
@view_config(route_name='authFacebook', renderer='authFacebook.slim',permission='comum')
def authFacebook(request):
"""
Apenas autoriza e redireciona usuário para twitter
"""
#autorização OAuth
#fbApi = Facebook("473549246060347", "ba198578f77ea264f8ed4053dd323054")
#token e secret da aplicação ->pegar no face
args = dict(client_id="473549246060347", redirect_uri=request.route_url('loginAuthFace'))
try:
return HTTPFound("https://graph.facebook.com/oauth/authorize?" + urllib.urlencode(args))
except:
print 'Error! Failed to get request token.'
return HTTPFound(request.route_url('login'))
@view_config(route_name='loginFacebook', renderer='loginFacebook.slim',permission='comum')
def loginFacebook(request):
try:
return HTTPFound(request.route_url('login'))
except:
print 'Error! Failed to get request token.'
@view_config(route_name='loginAuthFace', renderer='loginAuthFace.slim',permission='comum')
def loginAuthFace(request):
try:
return HTTPFound(request.route_url('login'))
except:
print 'Error! Failed to get request token.'
@view_config(route_name='login', renderer='login.slim')
def login(request):
"""
Página para login, site, face e twitter
"""
esquema = FormLogin().bind(request=request)
esquema.title = "Login"
#botoes nao aceitam frases como label = "esqueci a senha"
form = deform.Form(esquema, buttons=('Entrar', 'Esqueci a senha'))
#form = deform.Form(esquema, buttons=('Entrar', 'Esqueci'))
if authenticated_userid(request):
request.session.flash(u"Usuário já está logado, caso queira entrar com usuário diferente, faça o logout.")
return HTTPFound(location=request.route_url('usuario'))
if 'Entrar' in request.POST:
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
email = request.POST.get("email")
senha = request.POST.get("senha")
if email in request.db["usrTree"]:
cidadao = Cidadao("","")
cidadao = request.db["usrTree"][email]
if cidadao.senha == senha:
headers = remember(request, email)
next = request.route_url('usuario')
request.session.flash(u"Usuário logado")
return HTTPFound(location=next, headers=headers)
else:
request.session.flash(u"Email ou senha inválidos")
else:
request.session.flash(u"Email ou senha inválidos")
return {'form': form.render()}
#não entra nesse elif
#elif 'Esqueci' in request.POST:
elif 'Esqueci_a_senha' in request.POST:
return HTTPFound(location=request.route_url('r_senha'))
else:
return {'form': form.render()}
@view_config(route_name='usuario', renderer='usuario.slim', permission='basica')
def usuario(request):
"""
Página do perfil do usuário
"""
cidadao = Cidadao("","")
if not authenticated_userid(request) in request.db["usrTree"]:
cidadao = request.db["twtTree"][authenticated_userid(request)]
return {
'cidadao': cidadao
}
@view_config(route_name='perfilUsr', renderer='usuario.slim', permission='comum')
def perfilUsuario(request):
"""
Página do perfil do usuário
"""
cidadao = request.db["twtTree"][request.matchdict['id']]
return {
'cidadao': cidadao
}
@view_config(route_name='sobre', renderer='sobre.slim')
def sobre(request):
"""
Página sobre
"""
return {}
@view_config(route_name='mapa', renderer='mapa.slim')
def mapa(request):
"""
Página dos orçamentos mapeados
"""
esquemaPesq = FormPesqMapa().bind(request=request)
esquemaPesq.title = "Pesquisa"
formPesq = deform.Form(esquemaPesq, buttons=('Pesquisar',))
esquema = FormMapa().bind(request=request)
esquema.title = "Mapa"
#legenda do botão - inserir ponto
form = deform.Form(esquema, buttons=('Inserir',))
if 'Pesquisar' in request.POST:
try:
formPesq.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
return HTTPFound(location=request.route_url('lista'))
elif 'Inserir' in request.POST:
return HTTPFound(location=request.route_url('inserir_ponto'))
else:
# values passed to template for rendering
return {
'form':form.render(),
'formPesq':formPesq.render(),
'showmenu':True,
}
@view_config(route_name='orcamentoId', renderer='orcamento.slim')
def orcamento(request):
"""
Página de um orçamento individual
"""
id = int(request.matchdict['id'])
esquemaFoto = FormOrcFoto().bind(request=request)
esquemaFoto.title = "Foto"
formFoto = deform.Form(esquemaFoto, buttons=('Upload Foto',))
esquemaVideo = FormOrcVideo().bind(request=request)
esquemaVideo.title = "Video"
formVideo = deform.Form(esquemaVideo, buttons=('Upload Video',))
esquemaSeguir = FormSeguirAtv().bind(request=request)
esquemaSeguir.title = "Seguir atualizações"
formSeguir = deform.Form(esquemaSeguir, buttons=('Salvar',))
esquema = FormOrcamento().bind(request=request)
#esquema.title = "Comentários"
form = deform.Form(esquema, buttons=('Enviar',))
esquemaResp = FormOrcamentoResp().bind(request=request)
#esquema.title = "Resposta"
formResp = deform.Form(esquemaResp, buttons=('Responder',))
#atv_orc = Atividade_orcamento("","")
atv_orc = Atividade()
#modificar o orçamento a ser exibido na página
atv_orc = request.db["atvTree"][id]
#atividade vinda do mapa
#atv_orc = request.db["orcTree"]
#atv_orc = request.db["atvTree"]
#esquema para colocar o id nos forms das respostas
# envia para o template uma lista com forms de resposta
i = 0
formsResps = []
#criar forulários de respostas ao conetários já existentes
#cada form tem um id, para identificarmos qual é o comentário e sua resposta respectiva
for coment in atv_orc.midia_coment:
formResp = deform.Form(esquemaResp, buttons=('Responder',), formid=str(i))
formsResps.append(formResp.render())
i = i + 1
cidadao = Cidadao("","")
if (authenticated_userid(request)):
cidadao = request.db["usrTree"][authenticated_userid(request)]
if 'Upload_Foto' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
formFoto.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addFoto(dadosSite)
foto = Midia_foto(request.POST.get('foto'), datetime.now(), authenticated_userid(request))
Atividade_cidadao.addFoto(atv_orc, foto)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Upload_Video' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
formVideo.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#colocar dentro de try catch
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addVideo(dadosSite)
video = Midia_video(request.POST.get('video'), datetime.now(), authenticated_userid(request))
#bolar alguma validação de lnk?
#colocar essas funções no model
video.link = video.linkOrig.replace('.com/','.com/embed/')
video.link = video.linkOrig.replace('watch?v=','embed/')
Atividade_cidadao.addVideo(atv_orc, video)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Enviar' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
esquema = FormOrcamento().bind(request=request)
form = deform.Form(esquema, buttons=('Enviar',))
form.render()
form.validate(request.POST.items())
except deform.ValidationFailure as e:
print "form de comentário deu erro"
return {'form': e.render()}
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addComent(dadosSite)
coment = Midia_comentario(request.POST.get('comentario'), datetime.now(), authenticated_userid(request))
Atividade_cidadao.addComent(atv_orc, coment)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Responder' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
esquemaResp = FormOrcamentoResp().bind(request=request)
formResp = deform.Form(esquemaResp, buttons=('Responder',))
formResp.render()
formResp.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#pega o id do form que enviou a resposta do comentário
posted_formid = int(request.POST['__formid__'])
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addComent(dadosSite)
coment = Midia_comentario(request.POST.get('resposta'), datetime.now(), authenticated_userid(request))
transaction.commit()
#adiciona a resposta ao comentário pai, conforme o id do form de resposta
comentPai = atv_orc.midia_coment[posted_formid]
comentPai.respostas.append(coment)
comentPai._p_changed = 1
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Salvar' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
formSeguir.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
Cidadao.addSeguir(cidadao, atv_orc, request.POST.get('seguir'))
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
else:
seguirAtv = cidadao.pontos_a_seguir
#verifica se o usuário logado está seguindo a atividade
if atv_orc.atividade in seguirAtv:
appstruct = {'seguir':True,}
else:
appstruct = {'seguir':False,}
appstructOrc = record_to_appstruct(atv_orc)
return {
'orcamento': atv_orc,
'form': form.render(appstruct=appstructOrc),
'coments': atv_orc.midia_coment,
'formResp': formsResps,
'formVideo': formVideo.render(),
'videos': atv_orc.midia_video,
'fotos': atv_orc.midia_foto,
'formFoto': formFoto.render(),
'formSeguir': formSeguir.render(appstruct=appstruct),
#enviar o midia_foto assim que estiverem cadastradas no banco
}
@view_config(route_name='inserir_ponto', renderer='inserir_ponto.slim', permission='basica')
def inserir_ponto(request):
"""
Página para inserir novos pontos/atividades no mapa pelo usuário
"""
esquema = FormInserirP().bind(request=request)
esquema.title = "Inserir ponto no mapa"
form = deform.Form(esquema, buttons=('Inserir', 'Cancelar'))
#não se se isto fica aqui ou no models
if not request.db.has_key("atvTree"):
request.db["atvTree"] = OOBTree()
if 'Inserir' in request.POST:
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
if(authenticated_userid(request)):
dadosSite = request.db['dadosSite']
# Criação e inserção
atividade = Atividade_cidadao()
atividade = merge_session_with_post(atividade, request.POST.items())
#inserir id para a atividade?
atividade.data = datetime.now()
atividade.cidadao = authenticated_userid(request)
atividade.id = dadosSite.proxId
request.db['atvTree'][atividade.id] = atividade
dadosSite.proxId = dadosSite.proxId + 1
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atividade)
Dados_site.addAtvUsr(dadosSite)
transaction.commit()
request.session.flash(u"Atividade de usuário cadastrada com sucesso.")
#retorno -> levar atividade inserida
return HTTPFound(location=request.route_url('orcamentoId', id = atividade.id))
else:
return {'form': form.render()}
@view_config(route_name='privacidade', renderer='privacidade.slim')
def privacidade(request):
"""
Página com a política de privacidade do site
"""
return {}
@view_config(route_name='termos', renderer='termos.slim')
def termos(request):
"""
Página com os termos e condições de uso do site
"""
return {}
@view_config(
route_name='rcad_senha',
renderer='rcad_senha.slim',
permission='basica'
)
def rcad_senha(request):
"""Redefinir senha de usuário"""
esquema = FormRecadSenha().bind(request=request)
esquema.title = "Redefinir senha"
cidadao = Cidadao("","")
form = deform.Form(esquema, buttons=('Salvar',))
if 'Salvar' in request.POST:
# Validação do formulário
try:
appstruct = form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#validar token, se ok, merge session
cidadao = merge_session_with_post(cidadao, request.POST.items())
transaction.commit()
return HTTPFound(location=request.route_url('usuario'))
else:
return{'form':form.render()}
@view_config(
route_name='r_senha',
renderer='r_senha.slim',
permission='comum'
)
def r_senha(request):
"""
Reconfiguração de senha do usuário
Envia token para email do usuário
"""
esquema = FormRSenha().bind(request=request)
esquema.title = "Reenviar senha"
form = deform.Form(esquema, buttons=('Enviar',))
if 'Enviar' in request.POST:
# Validação do formulário
try:
appstruct = form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
email = request.POST.get("email")
if email in request.db["usrTree"]:
#enviar email com token, armazenar esse token
headers = remember(request, email)
return HTTPFound(location=request.route_url('rcad_senha'), headers=headers)
else:
warnings.warn("Email ou senha inválidos", DeprecationWarning)
return HTTPFound(location=request.route_url('rcad_senha'))
else:
return {'form': form.render()}
@view_config(
route_name='denuncia',
renderer='denuncia.slim',
permission='basica'
)
def denunciar(request):
"""
Formulário para enviar denúncia de mídia
"""
id = int(request.matchdict['id'])
tipoMidia = request.matchdict['tmidia']
idMidia = int(request.matchdict['idM'])
atividade = Atividade()
atividade = request.db["atvTree"][id]
if tipoMidia == 'foto':
midia = atividade.midia_foto[idMidia]
elif tipoMidia == 'video':
midia = atividade.midia_video[idMidia]
elif tipoMidia == 'comentario':
midia = atividade.midia_coment[idMidia]
esquema = FormDenuncia().bind(request=request)
esquema.title = "Denunciar mídia"
#midia = Midia("", "")
#selecionar de algum jeito essa mídia vinda de um link
form = deform.Form(esquema, buttons=('Enviar',))
if 'Enviar' in request.POST:
# Validação do formulário
try:
esquema = FormDenuncia().bind(request=request)
esquema.title = "Denunciar mídia"
form = deform.Form(esquema, buttons=('Enviar',))
form.render()
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
denuncia = Denuncia(request.POST.get("motivo"), authenticated_userid(request))
midia.addDenuncia(denuncia)
atividade.delMidiaDen()
cidadao = request.db["usrTree"][authenticated_userid(request)]
cidadao.addDenuncia(denuncia)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
else:
return {'form': form.render()}
| gpl-3.0 |
jalavik/invenio | invenio/modules/upgrader/upgrades/invenio_2013_06_20_new_bibcheck_rules_table.py | 15 | 1210 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.legacy.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "New bibcheck_rules table"
def do_upgrade():
run_sql("""
CREATE TABLE IF NOT EXISTS bibcheck_rules (
name varchar(150) NOT NULL,
last_run datetime NOT NULL default '0000-00-00',
PRIMARY KEY (name)
) ENGINE=MyISAM;
""")
def estimate():
""" Estimate running time of upgrade in seconds (optional). """
return 1
| gpl-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_signal.py | 79 | 33870 | import unittest
from test import support
from contextlib import closing
import gc
import pickle
import select
import signal
import struct
import subprocess
import traceback
import sys, os, time, errno
from test.script_helper import assert_python_ok, spawn_python
try:
import threading
except ImportError:
threading = None
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except OSError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
def handlerB(self, signum, frame):
self.b_called = True
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
pass
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
class WakeupFDTests(unittest.TestCase):
def test_invalid_fd(self):
fd = support.make_bad_fd()
self.assertRaises(ValueError, signal.set_wakeup_fd, fd)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import fcntl
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
for fd in (read, write):
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(signals, ordered, test_body)
assert_python_ok('-c', code)
def test_wakeup_write_error(self):
# Issue #16105: write() errors in the C signal handler should not
# pass silently.
# Use a subprocess to have only one thread.
code = """if 1:
import errno
import fcntl
import os
import signal
import sys
import time
from test.support import captured_stderr
def handler(signum, frame):
1/0
signal.signal(signal.SIGALRM, handler)
r, w = os.pipe()
flags = fcntl.fcntl(r, fcntl.F_GETFL, 0)
fcntl.fcntl(r, fcntl.F_SETFL, flags | os.O_NONBLOCK)
# Set wakeup_fd a read-only file descriptor to trigger the error
signal.set_wakeup_fd(r)
try:
with captured_stderr() as err:
signal.alarm(1)
time.sleep(5.0)
except ZeroDivisionError:
# An ignored exception should have been printed out on stderr
err = err.getvalue()
if ('Exception ignored when trying to write to the signal wakeup fd'
not in err):
raise AssertionError(err)
if ('OSError: [Errno %d]' % errno.EBADF) not in err:
raise AssertionError(err)
else:
raise AssertionError("ZeroDivisionError not raised")
"""
r, w = os.pipe()
try:
os.write(r, b'x')
except OSError:
pass
else:
self.skipTest("OS doesn't report write() error on the read end of a pipe")
finally:
os.close(r)
os.close(w)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(TIMEOUT_FULL)
mid_time = time.time()
dt = mid_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.time()
dt = after_time - mid_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except OSError:
pass
else:
raise Exception("OSError not raised")
after_time = time.time()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
signal.signal(signal.SIGUSR1, handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
os.kill(os.getpid(), signum1)
os.kill(os.getpid(), signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except OSError as err:
if err.errno != errno.EINTR:
raise
else:
sys.exit(2)
sys.exit(3)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=5.0)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
if sys.platform == 'freebsd6':
# Issue #12392 and #12469: send a signal to the main thread
# doesn't work before the creation of the first thread on
# FreeBSD 6
def noop():
pass
thread = threading.Thread(target=noop)
thread.start()
thread.join()
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
def handler(signum, frame):
1/0
%s
blocked = %s
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
# Issue #18238: sigwaitinfo() can be interrupted on Linux (raises
# InterruptedError), but not on AIX
@unittest.skipIf(sys.platform.startswith("aix"),
'signal.sigwaitinfo() cannot be interrupted on AIX')
def test_sigwaitinfo_interrupted(self):
self.wait_helper(signal.SIGUSR1, '''
def test(signum):
import errno
hndl_called = True
def alarm_handler(signum, frame):
hndl_called = False
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(1)
try:
signal.sigwaitinfo([signal.SIGUSR1])
except OSError as e:
if e.errno == errno.EINTR:
if not hndl_called:
raise Exception("SIGALRM handler not called")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
''')
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipIf(threading is None, "test needs threading module")
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def read_sigmask():
return signal.pthread_sigmask(signal.SIG_BLOCK, [])
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
kill(signum)
# Check the new mask
blocked = read_sigmask()
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediately the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == 'freebsd6',
"issue #12392: send a signal to the main thread doesn't work "
"before the creation of the first thread on FreeBSD 6")
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
def test_main():
try:
support.run_unittest(PosixTests, InterProcessSignalTests,
WakeupFDTests, WakeupSignalTests,
SiginterruptTest, ItimerTest, WindowsSignalTests,
PendingSignalsTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
| gpl-2.0 |
harryliu/edwin | edwinAgent/site_packages/dbRowFactory/pyDbRowFactory.py | 4 | 14255 | # -*- coding: utf-8 -*-
'''
#@summary: DbRowFactory is one common factory to convert db row tuple into user-defined class object.
It is supported SqlAlchemy, and any database modules conformed to Python Database API
Specification v2.0. e.g. cx_Oracle, zxJDBC
#@note:
Note 1: The DbRowFactory will create one row instance based on row class binding,
and try to assign all fields' value to the new object.
The DbRowFactory maps field and class setter_method/attribute
by matching names. If both a setter_method and an attribute match
the same field, the setter_method will be chosen eventually.
Note 2: __init__() of the class must no other arguments rather than self
#@see: http://www.python.org/dev/peps/pep-0249/
#Tested under: Python 2.7, Jython2.5.2
#Change log:
version 1.0.1, 09 Nov. 2011, initial version
version 1.0.2, 16 Feb. 2012, use pyObjectCreator to instantiate rowClass
version 1.0.3, 08 Mar. 2012, fromSqlAlchemyResultProxy(), fetchAllRowObjects() functions added
version 1.0.4, 31 May. 2013, bug fixed version, disable auto-close cursor if not created by SqlAlchemy
version 1.0.5, 04 Feb. 2014, import pyObjectCreator in explicit relative importing
##====================sample begin=======
#sample code , file: OracleJdbcSample.py
from __future__ import with_statement
from com.ziclix.python.sql import zxJDBC
from pyDbRowFactory import DbRowFactory
class rowClass2(object):
def __init__(self):
self.owner=None
self.tablename=None
def setOWNER(self, value):
self.owner=value
def print2(self):
print("ownerName="+self.owner+",tablename="+self.tablename)
if __name__=="__main__":
#DB API 2.0 cursor sample
jdbc_url="jdbc:oracle:thin:@127.0.0.1:1521:orcl";
username = "user1"
password = "pwd1"
driver = "oracle.jdbc.driver.OracleDriver"
with zxJDBC.connect(jdbc_url, username, password, driver) as conn:
with conn.cursor() as cursor :
cursor.execute("""select tbl.owner, tbl.table_name tablename,
tbl.tablespace_name from all_tables tbl""")
#use DbRowFactory to bind rowClass2 class defined in pkg1.OracleJdbcSample.py
rowFactory=DbRowFactory(cursor, "pkg1.OracleJdbcSample.rowClass2")
for rowObject in rowFactory.fetchAllRowObjects():
rowObject.print2()
#sqlalchemy sample
from sqlalchemy import create_engine
engine=create_engine("sqlite:///:memory:", echo=True)
sql="""select tbl.owner, tbl.table_name tablename,
tbl.tablespace_name from all_tables tbl"""
resultProxy=engine.execute(sql)
rowFactory=DbRowFactory.fromSqlAlchemyResultProxy(resultProxy, "pkg1.OracleJdbcSample.rowClass2")
for rowObject in rowFactory.fetchAllRowObjects():
rowObject.print2()
##====================sample end=======
'''
import inspect
import sys
__author__ = 'Harry Liu, <[email protected]>'
__version__= '1.0.5'
class DbRowFactory(object):
'''
#@summary: DbRowFactory is one common row factory for any database
module conformed to Python Database API Specification
v2.0. e.g. cx_Oracle, zxJDBC
#@note:
Note 1: The DbRowFactory will create one row instance based on row class binding,
and try to assign all fields' value to the new object.
The DbRowFactory maps field and class setter_method/attribute
by matching names. If both a setter_method and an attribute match
the same field, the setter_method will be chosen eventually.
Note 2: __init__() of the class must no other arguments rather than self
#@see: http://www.python.org/dev/peps/pep-0249/
#@author: Harry Liu, [email protected]
'''
FIELD_TO_SETTER=1
FIELD_TO_ATTRIBUTE=2
FIELD_TO_NONE=0
def __init__(self, cursor, rowClassFullName, setterPrefix="set", caseSensitive=False):
'''
##@summary: Constructor of DbRowFactory
[arguments]
cursor: Db API 2.0 cursor object
rowClassFullName: full class name that you want to instantiate, included package and module name if has
setterPrefix: settor method prefix
caseSensitive: match fieldname with class setter_method/attribute in case sensitive or not
'''
self._cursor=cursor
self._setterPrefix=setterPrefix
self._caseSensitive=caseSensitive
self._fieldMemeberMapped=False
self._allMethods=[]
self._allAttributes=[]
self._fieldMapList={}
self._rowClassMeta = getClassMeta(rowClassFullName)
self._resultProxy=None
@classmethod
def fromSqlAlchemyResultProxy(cls, resultProxy, rowClassFullName, setterPrefix="set", caseSensitive=False):
'''
##@summary: another constructor of DbRowFactory
[arguments]
resultProxy: SqlAlchemyResultProxy object, can returned after engine.execute("select 1") called,
rowClassFullName: full class name that you want to instantiate, included package and module name if has
setterPrefix: settor method prefix
caseSensitive: match fieldname with class setter_method/attribute in case sensitive or not
'''
factory= cls(resultProxy.cursor, rowClassFullName, setterPrefix, caseSensitive)
factory._resultProxy=resultProxy
return factory
def createRowInstance(self, row ,*args,**kwargs):
'''
#@summary: create one instance object, and try to assign all fields' value to the new object
[arguments]
row: row tuple in a _cursor
*args: list style arguments in class constructor related to rowClassFullName
*kwargs: dict style arguments in class constructor related to rowClassFullName
'''
#step 1: initialize rowInstance before finding attributes.
rowObject = self._rowClassMeta(*args,**kwargs)
#mapping process run only once in order to gain better performance
if self._fieldMemeberMapped==False:
#dir() cannot list attributes before one class instantiation
self._allAttributes=self._getAllMembers(rowObject)
self._allMethods=self._getAllMembers(rowObject)
self._fieldMapList=self._mapFieldAndMember()
self._fieldMemeberMapped=True
#step 2: assign field values
i=0
#self._fieldMapList is [{Field1:(member1Flag,member1)},{Field2:(member2Flag,member2)}]
for fieldMemberDict in self._fieldMapList:
for field in fieldMemberDict:
member=fieldMemberDict[field]
if member[0]==self.FIELD_TO_NONE:
pass
else:
fieldValue=row[i]
if member[0]==self.FIELD_TO_SETTER:
m=getattr(rowObject, member[1])
m(fieldValue)
elif member[0]==self.FIELD_TO_ATTRIBUTE:
setattr(rowObject, member[1], fieldValue)
i=i+1
return rowObject
def _getAllMembers(self,clazz) :
'''
#@summary: extract all user-defined methods in given class
#@param param clazz: class object
'''
members=[member for member in dir(clazz)]
sysMemberList=['__class__','__doc__','__init__','__new__','__subclasshook__','__dict__', '__module__','__delattr__', '__getattribute__', '__hash__', '__repr__', '__setattr__', '__str__','__format__', '__reduce__', '__reduce_ex__', '__sizeof__', '__weakref__']
members=[member for member in members if str(member) not in sysMemberList]
return members
def _mapFieldAndMember(self):
'''
#@summary: create mapping between field and class setter_method/attribute, setter_method is preferred than attribute
#field can be extract from cursor.description, e.g.
sql: select 1 a, sysdate dt from dual
cursor.description:
[(u'A', 2, 22, None, 0, 0, 1), (u'DT', 91, 7, None, None, None, 1)]
'''
#print(self._cursor.description)
fields=[f[0] for f in self._cursor.description]
mapList=[]
#result is [{Field1:(member1Flag,member1)},{Field2:(member2Flag,member2)}]
for f in fields:
m= self._getSetterMethod(f)
key=f
if m:
value=(self.FIELD_TO_SETTER,m)
else:
m= self._getAttribute(f)
if m:
value=(self.FIELD_TO_ATTRIBUTE,m)
else:
value=(self.FIELD_TO_NONE,None)
mapList.append({key:value})
return mapList
def _getAttribute(self, fieldName):
'''
#@summary: get related attribute to given fieldname
'''
if self._caseSensitive:
if fieldName in self._allAttributes:
return fieldName
else:
fieldNameUpper=fieldName.upper()
allAttributesMap={} # attributeUpper=attribute
for attr in self._allAttributes:
allAttributesMap[attr.upper()]=attr
if fieldNameUpper in allAttributesMap:
return allAttributesMap[fieldNameUpper]
def _getSetterMethod(self, fieldName):
'''
##@summary: get related setter method to given fieldname
'''
if self._caseSensitive:
setter=self._setterPrefix+fieldName
if setter in self._allMethods:
return setter
else:
setterUpper=self._setterPrefix+fieldName
setterUpper=setterUpper.upper()
allMethodMap={} # methodUpper=method
for method in self._allMethods:
allMethodMap[method.upper()]=method
if setterUpper in allMethodMap:
return allMethodMap[setterUpper]
def _closeResultProxy(self):
if self._resultProxy is not None:
if self._resultProxy.closed==False:
self._resultProxy.close()
def _createdBySqlAlchemy(self):
return self._resultProxy!=None
def fetchAllRowObjects(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``.
the cursor is automatically closed after this is called
"""
result=[]
rows=self._cursor.fetchall()
for row in rows:
rowObject=self.createRowInstance(row)
result.append(rowObject)
if self._createdBySqlAlchemy():
self._cursor.close()
self._closeResultProxy()
return result
def fetchManyRowObjects(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
If rows are present, the cursor remains open after this is called.
Else the cursor is automatically closed and an empty list is returned.
"""
result=[]
rows=self._cursor.fetchmany(size)
for row in rows:
rowObject=self.createRowInstance(row)
result.append(rowObject)
if self._createdBySqlAlchemy():
if len(rows)==0:
self._cursor.close()
self._closeResultProxy()
return result
def fetchOneRowObject(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
If a row is present, the cursor remains open after this is called.
Else the cursor is automatically closed and None is returned.
"""
result=None
row = self._cursor.fetchone()
if row is not None:
result=self.createRowInstance(row)
else:
if self._createdBySqlAlchemy():
self._cursor.close()
self._closeResultProxy()
return result
##reference doc
#http://www.cnblogs.com/sevenyuan/archive/2010/12/06/1898056.html
#http://stackoverflow.com/questions/4513192/python-dynamic-class-names
#http://stackoverflow.com/questions/1796180/python-get-list-of-al-classes-within-current-module
def createInstance(full_class_name,*args,**kwargs):
'''
instantiate class dynamically
[arguments]
full_class_name: full class name that you want to instantiate, included package and module name if has
*args: list style arguments in class constructor
*kwargs: dict style arguments in class constructor
[return]
an instance of this full_class_name
[example]
import pyObjectCreator
full_class_name="knightmade.logging.Logger"
logger=pyObjectCreator.createInstance(full_class_name,'logname')
'''
class_meta=getClassMeta(full_class_name)
if class_meta!=None:
obj=class_meta(*args,**kwargs)
else:
obj=None
return obj
def getClassMeta(full_class_name):
'''
get class meta object of full_class_name, then we can use this meta object to instantiate full_class_name
[arguments]
full_class_name: full class name that you want to instantiate, included package and module name if has
[return]
an instance of this full_class_name
[example]
import pyObjectCreator
full_class_name="knightmade.logging.Logger"
loggerMeta=pyObjectCreator.getClassMeta(full_class_name)
'''
namespace=full_class_name.strip().rsplit('.',1)
if len(namespace)==1:
class_name=namespace[0]
class_meta=_getClassMetaFromCurrModule(class_name)
else:
module_name=namespace[0]
class_name=namespace[1]
class_meta=_getClassMetaFromOtherModule(class_name,module_name)
return class_meta
def _getClassMetaFromCurrModule(class_name):
result=None
module_name="__main__"
for name, obj in inspect.getmembers(sys.modules[module_name]):
if inspect.isclass(obj):
if name==class_name:
result=obj
break
return result
def _getClassMetaFromOtherModule(class_name, module_name):
module_meta=__import__(module_name,globals(), locals(),[class_name])
if module_meta!=None:
class_meta=getattr(module_meta,class_name)
else:
class_meta=None
return class_meta | apache-2.0 |
spierepf/mpf | mpf/modes/tilt/code/tilt.py | 1 | 7456 | """Contains the Tilt mode code"""
# tilt.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
from mpf.system.config import CaseInsensitiveDict
from mpf.system.mode import Mode
from mpf.system.timing import Timing
class Tilt(Mode):
def mode_init(self):
self._balls_to_collect = 0
self._last_warning_tick = 0
self.ball_ending_tilted_queue = None
self.tilt_event_handlers = set()
self.last_tilt_warning_switch_tick = 0
self.tilt_config = self.machine.config_processor.process_config2(
config_spec='tilt',
source=self._get_merged_settings('tilt'),
section_name='tilt')
def mode_start(self, **kwargs):
self._register_switch_handlers()
for event in self.tilt_config['reset_warnings_events']:
self.add_mode_event_handler(event, self.reset_warnings)
def mode_stop(self, **kwargs):
self._remove_switch_handlers()
self.reset_warnings_handlers = set()
def _register_switch_handlers(self):
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_warning_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self._tilt_warning_switch_handler)
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self.tilt)
for switch in self.machine.switches.items_tagged(
self.tilt_config['slam_tilt_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self.slam_tilt)
def _remove_switch_handlers(self):
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_warning_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self._tilt_warning_switch_handler)
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self.tilt)
for switch in self.machine.switches.items_tagged(
self.tilt_config['slam_tilt_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self.slam_tilt)
def tilt_warning(self):
"""Processes a tilt warning. If the number of warnings is the number to
cause a tilt, a tilt will be processed.
"""
self.last_tilt_warning_switch_tick = self.machine.tick_num
if not self.player:
return
self.log.debug("Tilt Warning")
self._last_warning_tick = self.machine.tick_num
self.player[self.tilt_config['tilt_warnings_player_var']] += 1
warnings = self.player[self.tilt_config['tilt_warnings_player_var']]
if warnings >= self.tilt_config['warnings_to_tilt']:
self.tilt()
else:
self.machine.events.post('tilt_warning',
warnings=warnings,
warnings_remaining=(self.tilt_config['warnings_to_tilt'] -
warnings))
self.machine.events.post('tilt_warning_{}'.format(warnings))
def reset_warnings(self, **kwargs):
"""Resets the tilt warnings for the current player."""
try:
self.player[self.tilt_config['tilt_warnings_player_var']] = 0
except AttributeError:
pass
def tilt(self, **kwargs):
"""Causes the ball to tilt."""
if not self.machine.game:
return
self._balls_to_collect = self.machine.playfield.balls
# todo use collection
self.log.debug("Processing Tilt. Balls to collect: %s",
self._balls_to_collect)
self.machine.game.tilted = True
self.machine.events.post('tilt')
self._disable_autofires()
self._disable_flippers()
self.tilt_event_handlers.add(
self.machine.events.add_handler('ball_ending',
self._ball_ending_tilted))
for device in self.machine.ball_devices:
if 'drain' in device.tags:
self.tilt_event_handlers.add(
self.machine.events.add_handler(
'balldevice_{}_ball_enter'.format(device.name),
self._tilted_ball_drain))
else:
self.tilt_event_handlers.add(
self.machine.events.add_handler(
'balldevice_{}_ball_enter'.format(device.name),
self._tilted_ball_entered_non_drain_device))
self.machine.game.ball_ending()
def _disable_flippers(self):
for flipper in self.machine.flippers:
flipper.disable()
def _disable_autofires(self):
for autofire in self.machine.autofires:
autofire.disable()
def _tilted_ball_drain(self, new_balls, unclaimed_balls, device):
self._balls_to_collect -= unclaimed_balls
self.log.debug("Tilted ball drain. Balls to collect: %s",
self._balls_to_collect)
if self._balls_to_collect <= 0:
self._tilt_done()
return {'unclaimed_balls': 0}
def _tilted_ball_entered_non_drain_device(self, new_balls, unclaimed_balls,
device):
return {'unclaimed_balls': unclaimed_balls}
def _tilt_switch_handler(self):
self.tilt()
def _tilt_warning_switch_handler(self):
if (self._last_warning_tick + self.tilt_config['multiple_hit_window']
<= self.machine.tick_num):
self.tilt_warning()
def _ball_ending_tilted(self, queue):
self.ball_ending_tilted_queue = queue
queue.wait()
if not self._balls_to_collect:
self._tilt_done()
def _tilt_done(self):
if self.tilt_settle_ms_remaining():
self.delay.reset(ms=self.tilt_settle_ms_remaining(),
callback=self._tilt_done,
name='tilt')
else:
self.machine.game.tilted = False
self.machine.events.post('tilt_clear')
self.ball_ending_tilted_queue.clear()
self.machine.events.remove_handlers_by_keys(self.tilt_event_handlers)
self.tilt_event_handlers = set()
def tilt_settle_ms_remaining(self):
"""Returns the amount of milliseconds remaining until the tilt settle
time has cleared.
"""
ticks = (self.machine.tick_num - self.last_tilt_warning_switch_tick -
self.tilt_config['settle_time'])
if ticks >= 0:
return 0
else:
return abs(ticks * Timing.ms_per_tick)
def slam_tilt(self):
self.machine.events.post('slam_tilt')
self.game_ended() | mit |
jkleckner/ansible | docsite/conf.py | 3 | 6348 | # -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
#import sphinx_rtd_theme
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath('_themes'))
VERSION='0.01'
AUTHOR='AnsibleWorks'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013 AnsibleWorks"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
html_theme_path = ['_themes']
html_theme = 'srtd'
html_short_title = 'Ansible Documentation'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 1.2 Documentation',
AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autoclass_content = 'both'
| gpl-3.0 |
wlanslovenija/django-tastypie | tastypie/throttle.py | 5 | 4861 | from __future__ import unicode_literals
import time
from django.core.cache import cache
class BaseThrottle(object):
"""
A simplified, swappable base class for throttling.
Does nothing save for simulating the throttling API and implementing
some common bits for the subclasses.
Accepts a number of optional kwargs::
* ``throttle_at`` - the number of requests at which the user should
be throttled. Default is 150 requests.
* ``timeframe`` - the length of time (in seconds) in which the user
make up to the ``throttle_at`` requests. Default is 3600 seconds (
1 hour).
* ``expiration`` - the length of time to retain the times the user
has accessed the api in the cache. Default is 604800 (1 week).
"""
def __init__(self, throttle_at=150, timeframe=3600, expiration=None):
self.throttle_at = throttle_at
# In seconds, please.
self.timeframe = timeframe
if expiration is None:
# Expire in a week.
expiration = 604800
self.expiration = int(expiration)
def convert_identifier_to_key(self, identifier):
"""
Takes an identifier (like a username or IP address) and converts it
into a key usable by the cache system.
"""
bits = []
for char in identifier:
if char.isalnum() or char in ['_', '.', '-']:
bits.append(char)
safe_string = ''.join(bits)
return "%s_accesses" % safe_string
def should_be_throttled(self, identifier, **kwargs):
"""
Returns whether or not the user has exceeded their throttle limit. If
throttled, can return either True, and int specifying the number of
seconds to wait, or a datetime object specifying when to retry the request.
Always returns ``False``, as this implementation does not actually
throttle the user.
"""
return False
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Does nothing in this implementation.
"""
pass
class CacheThrottle(BaseThrottle):
"""
A throttling mechanism that uses just the cache.
"""
def should_be_throttled(self, identifier, **kwargs):
"""
Returns whether or not the user has exceeded their throttle limit. If
throttled, can return either True, and int specifying the number of
seconds to wait, or a datetime object specifying when to retry the request.
Maintains a list of timestamps when the user accessed the api within
the cache.
Returns ``False`` if the user should NOT be throttled or ``True`` if
the user should be throttled.
"""
key = self.convert_identifier_to_key(identifier)
# Weed out anything older than the timeframe.
now = int(time.time())
timeframe = int(self.timeframe)
throttle_at = int(self.throttle_at)
minimum_time = now - timeframe
times_accessed = [access for access in cache.get(key, []) if access >= minimum_time]
cache.set(key, times_accessed, self.expiration)
if len(times_accessed) >= throttle_at:
# Throttle them.
return timeframe - (now - times_accessed[-throttle_at])
# Let them through.
return False
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Stores the current timestamp in the "accesses" list within the cache.
"""
key = self.convert_identifier_to_key(identifier)
times_accessed = cache.get(key, [])
times_accessed.append(int(time.time()))
cache.set(key, times_accessed, self.expiration)
class CacheDBThrottle(CacheThrottle):
"""
A throttling mechanism that uses the cache for actual throttling but
writes-through to the database.
This is useful for tracking/aggregating usage through time, to possibly
build a statistics interface or a billing mechanism.
"""
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Does everything the ``CacheThrottle`` class does, plus logs the
access within the database using the ``ApiAccess`` model.
"""
# Do the import here, instead of top-level, so that the model is
# only required when using this throttling mechanism.
from tastypie.models import ApiAccess
super(CacheDBThrottle, self).accessed(identifier, **kwargs)
# Write out the access to the DB for logging purposes.
ApiAccess.objects.create(
identifier=identifier,
url=kwargs.get('url', ''),
request_method=kwargs.get('request_method', '')
)
| bsd-3-clause |
keyurpatel076/MissionPlannerGit | packages/IronPython.StdLib.2.7.4/content/Lib/timeit.py | 76 | 12059 | #! /usr/bin/env python
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 3)
-s/--setup S: statement to be executed once initially (default 'pass')
-t/--time: use time.time() (default on Unix)
-c/--clock: use time.clock() (default on Windows)
-v/--verbose: print raw timing results; repeat for more digits precision
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
The difference in default timer function is because on Windows,
clock() has microsecond granularity but time()'s granularity is 1/60th
of a second; on Unix, clock() has 1/100th of a second granularity and
time() is much more precise. On either platform, the default timer
functions measure wall clock time, not the CPU time. This means that
other processes running on the same computer may interfere with the
timing. The best thing to do when accurate timing is necessary is to
repeat the timing a few times and use the best time. The -r option is
good for this; the default of 3 repetitions is probably enough in most
cases. On Unix, you can use clock() to measure CPU time.
Note: there is a certain baseline overhead associated with executing a
pass statement. The code here doesn't try to hide it, but you should
be aware of it. The baseline overhead can be measured by invoking the
program without arguments.
The baseline overhead differs between Python versions! Also, to
fairly compare older Python versions to Python 2.3, you may want to
use python -O for the older versions to avoid timing SET_LINENO
instructions.
"""
import gc
import sys
import time
try:
import itertools
except ImportError:
# Must be an older Python version (see timeit() below)
itertools = None
__all__ = ["Timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 3
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer):
%(setup)s
_t0 = _timer()
for _i in _it:
%(stmt)s
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
def _template_func(setup, func):
"""Create a timer function. Used if the "statement" is a callable."""
def inner(_it, _timer, _func=func):
setup()
_t0 = _timer()
for _i in _it:
_func()
_t1 = _timer()
return _t1 - _t0
return inner
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer):
"""Constructor. See class doc string."""
self.timer = timer
ns = {}
if isinstance(stmt, basestring):
stmt = reindent(stmt, 8)
if isinstance(setup, basestring):
setup = reindent(setup, 4)
src = template % {'stmt': stmt, 'setup': setup}
elif hasattr(setup, '__call__'):
src = template % {'stmt': stmt, 'setup': '_setup()'}
ns['_setup'] = setup
else:
raise ValueError("setup is neither a string nor callable")
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec code in globals(), ns
self.inner = ns["inner"]
elif hasattr(stmt, '__call__'):
self.src = None
if isinstance(setup, basestring):
_setup = setup
def setup():
exec _setup in globals(), ns
elif not hasattr(setup, '__call__'):
raise ValueError("setup is neither a string nor callable")
self.inner = _template_func(setup, stmt)
else:
raise ValueError("stmt is neither a string nor callable")
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
if itertools:
it = itertools.repeat(None, number)
else:
it = [None] * number
gcold = gc.isenabled()
gc.disable()
timing = self.inner(it, self.timer)
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 3;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer).repeat(repeat, number)
def main(args=None):
"""Main program, used when run as a script.
The optional argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:s:r:tcvh",
["number=", "setup=", "repeat=",
"time", "clock", "verbose", "help"])
except getopt.error, err:
print err
print "use -h/--help for command line help"
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-t", "--time"):
timer = time.time
if o in ("-c", "--clock"):
timer = time.clock
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print __doc__,
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for i in range(1, 10):
number = 10**i
try:
x = t.timeit(number)
except:
t.print_exc()
return 1
if verbose:
print "%d loops -> %.*g secs" % (number, precision, x)
if x >= 0.2:
break
try:
r = t.repeat(repeat, number)
except:
t.print_exc()
return 1
best = min(r)
if verbose:
print "raw times:", " ".join(["%.*g" % (precision, x) for x in r])
print "%d loops," % number,
usec = best * 1e6 / number
if usec < 1000:
print "best of %d: %.*g usec per loop" % (repeat, precision, usec)
else:
msec = usec / 1000
if msec < 1000:
print "best of %d: %.*g msec per loop" % (repeat, precision, msec)
else:
sec = msec / 1000
print "best of %d: %.*g sec per loop" % (repeat, precision, sec)
return None
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
kuredatan/taxocluster | featuresVector.py | 2 | 2904 | import sys as s
from parsingMatch import parseAllFact
from parsingFasta import parseFasta
def sanitizeNode(node):
if not node or not (len(node) == 2):
#It means this node cannot appear in the taxonomic tree
return None
else:
return node
#@allMatches is a dictionary of (key=sample ID,value=list of sequences ID matching a read in this sample)
#@idSequences is a dictionary of (key=identifier of node,value=(name,rank of node))
#@filenames is the list of .match file names == list of samples ID /!\
#Returns a dictionary of (key=sample ID,value=list of nodes (name,rank) matching a read in this sample)
def getMatchingNodes(allMatches,idSequences,filenames):
matchingNodes = dict.fromkeys(filenames)
for sample in filenames:
matchingSequencesID = allMatches.get(sample)
matchingNodesInThisSample = []
if not (matchingSequencesID == None):
for sequenceID in matchingSequencesID:
node = idSequences.get(sequenceID)
cleanNode = sanitizeNode(node)
if cleanNode:
matchingNodesInThisSample.append(cleanNode)
matchingNodes[sample] = matchingNodesInThisSample
else:
print "The sample \'",sample,"\' could not be processed."
return matchingNodes
#Returns @matchingNodes, dictionary of (key=sample ID,value=list of nodes matched in this sample -i.e. at least in one read of this sample), and @idSequences, which is a dictionary of (key=identifier of sequence,value=(name,rank) of the node associated to this sequence)
#@filenames is the list of .match file names == list of samples ID /!\
#@fastaFileName is a string of the .fasta file name
#@sampleIDList is the list of samples ID
def featuresCreate(filenames,fastaFileName):
print "/!\ Parsing .match files"
print "[ You may have to wait a few seconds... ]"
#@allMatches is a dictionary of (key=sample ID,value=list of sequences ID matching a read in this sample)
import time
start = time.time()
allMatches = parseAllFact(filenames)
end = time.time()
print "TIME:",(end-start),"sec"
print "/!\ Parsing .fasta files"
print "[ You may have to wait a few seconds... ]"
try:
#@idSequences is a dictionary of (key=identifier,value=((name,rank))
#@paths is the list of paths from root to leaves
#@nodesListTree is the list of all nodes (internal nodes and leaves) in the tree
#We do not care for now of the OTU
idSequences,paths,nodesListTree,_ = parseFasta(fastaFileName)
except IOError:
print "\nERROR: Maybe the filename",fastaFileName,".fasta does not exist in \"meta\" folder\n"
s.exit(0)
matchingNodes = getMatchingNodes(allMatches,idSequences,filenames)
print "/!\ Matching nodes list done."
return matchingNodes,idSequences,paths,nodesListTree
| mit |
opencloudinfra/orchestrator | venv/Lib/distutils/__init__.py | 1211 | 3983 | import os
import sys
import warnings
import imp
import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn(
"The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path)
real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY))
# Copy the relevant attributes
try:
__revision__ = real_distutils.__revision__
except AttributeError:
pass
__version__ = real_distutils.__version__
from distutils import dist, sysconfig
try:
basestring
except NameError:
basestring = str
## patch build_ext (distutils doesn't know how to get the libs directory
## path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options (self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
## distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
system_distutils = os.path.join(distutils_path, 'distutils.cfg')
#if os.path.exists(system_distutils):
# found.insert(0, system_distutils)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith('pydistutils.cfg'):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
## distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
| gpl-3.0 |
vmarkovtsev/django | tests/modeladmin/models.py | 130 | 1603 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name='main_concerts')
opening_band = models.ForeignKey(Band, models.CASCADE, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
# This field is intentionally 2 characters long (#16080).
no = models.IntegerField(verbose_name="Number", blank=True, null=True)
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel, models.CASCADE)
| bsd-3-clause |
wreckJ/intellij-community | python/helpers/pydev/runfiles.py | 43 | 10205 | import os
def main():
import sys
#Separate the nose params and the pydev params.
pydev_params = []
other_test_framework_params = []
found_other_test_framework_param = None
NOSE_PARAMS = '--nose-params'
PY_TEST_PARAMS = '--py-test-params'
for arg in sys.argv[1:]:
if not found_other_test_framework_param and arg != NOSE_PARAMS and arg != PY_TEST_PARAMS:
pydev_params.append(arg)
else:
if not found_other_test_framework_param:
found_other_test_framework_param = arg
else:
other_test_framework_params.append(arg)
#Here we'll run either with nose or with the pydev_runfiles.
import pydev_runfiles
import pydev_runfiles_xml_rpc
import pydevd_constants
from pydevd_file_utils import _NormFile
DEBUG = 0
if DEBUG:
sys.stdout.write('Received parameters: %s\n' % (sys.argv,))
sys.stdout.write('Params for pydev: %s\n' % (pydev_params,))
if found_other_test_framework_param:
sys.stdout.write('Params for test framework: %s, %s\n' % (found_other_test_framework_param, other_test_framework_params))
try:
configuration = pydev_runfiles.parse_cmdline([sys.argv[0]] + pydev_params)
except:
sys.stderr.write('Command line received: %s\n' % (sys.argv,))
raise
pydev_runfiles_xml_rpc.InitializeServer(configuration.port) #Note that if the port is None, a Null server will be initialized.
NOSE_FRAMEWORK = 1
PY_TEST_FRAMEWORK = 2
try:
if found_other_test_framework_param:
test_framework = 0 #Default (pydev)
if found_other_test_framework_param == NOSE_PARAMS:
import nose
test_framework = NOSE_FRAMEWORK
elif found_other_test_framework_param == PY_TEST_PARAMS:
import pytest
test_framework = PY_TEST_FRAMEWORK
else:
raise ImportError()
else:
raise ImportError()
except ImportError:
if found_other_test_framework_param:
sys.stderr.write('Warning: Could not import the test runner: %s. Running with the default pydev unittest runner instead.\n' % (
found_other_test_framework_param,))
test_framework = 0
#Clear any exception that may be there so that clients don't see it.
#See: https://sourceforge.net/tracker/?func=detail&aid=3408057&group_id=85796&atid=577329
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
if test_framework == 0:
return pydev_runfiles.main(configuration) #Note: still doesn't return a proper value.
else:
#We'll convert the parameters to what nose or py.test expects.
#The supported parameters are:
#runfiles.py --config-file|-t|--tests <Test.test1,Test2> dirs|files --nose-params xxx yyy zzz
#(all after --nose-params should be passed directly to nose)
#In java:
#--tests = Constants.ATTR_UNITTEST_TESTS
#--config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE
#The only thing actually handled here are the tests that we want to run, which we'll
#handle and pass as what the test framework expects.
py_test_accept_filter = {}
files_to_tests = configuration.files_to_tests
if files_to_tests:
#Handling through the file contents (file where each line is a test)
files_or_dirs = []
for file, tests in files_to_tests.items():
if test_framework == NOSE_FRAMEWORK:
for test in tests:
files_or_dirs.append(file + ':' + test)
elif test_framework == PY_TEST_FRAMEWORK:
file = _NormFile(file)
py_test_accept_filter[file] = tests
files_or_dirs.append(file)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
else:
if configuration.tests:
#Tests passed (works together with the files_or_dirs)
files_or_dirs = []
for file in configuration.files_or_dirs:
if test_framework == NOSE_FRAMEWORK:
for t in configuration.tests:
files_or_dirs.append(file + ':' + t)
elif test_framework == PY_TEST_FRAMEWORK:
file = _NormFile(file)
py_test_accept_filter[file] = configuration.tests
files_or_dirs.append(file)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
else:
#Only files or dirs passed (let it do the test-loading based on those paths)
files_or_dirs = configuration.files_or_dirs
argv = other_test_framework_params + files_or_dirs
if test_framework == NOSE_FRAMEWORK:
#Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html
#show_stdout_option = ['-s']
#processes_option = ['--processes=2']
argv.insert(0, sys.argv[0])
if DEBUG:
sys.stdout.write('Final test framework args: %s\n' % (argv[1:],))
import pydev_runfiles_nose
PYDEV_NOSE_PLUGIN_SINGLETON = pydev_runfiles_nose.StartPydevNosePluginSingleton(configuration)
argv.append('--with-pydevplugin')
# Return 'not' because it will return 'success' (so, exit == 0 if success)
return not nose.run(argv=argv, addplugins=[PYDEV_NOSE_PLUGIN_SINGLETON])
elif test_framework == PY_TEST_FRAMEWORK:
if DEBUG:
sys.stdout.write('Final test framework args: %s\n' % (argv,))
sys.stdout.write('py_test_accept_filter: %s\n' % (py_test_accept_filter,))
import os
try:
xrange
except:
xrange = range
for i in xrange(len(argv)):
arg = argv[i]
#Workaround bug in py.test: if we pass the full path it ends up importing conftest
#more than once (so, always work with relative paths).
if os.path.isfile(arg) or os.path.isdir(arg):
from pydev_imports import relpath
arg = relpath(arg)
argv[i] = arg
d = os.path.dirname(__file__)
if d not in sys.path:
sys.path.insert(0, d)
import pickle, zlib, base64
# Update environment PYTHONPATH so that it finds our plugin if using xdist.
os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)
# Set what should be skipped in the plugin through an environment variable
s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter)))
if pydevd_constants.IS_PY3K:
s = s.decode('ascii') # Must be str in py3.
os.environ['PYDEV_PYTEST_SKIP'] = s
# Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the
# main pid to give xml-rpc notifications).
os.environ['PYDEV_MAIN_PID'] = str(os.getpid())
os.environ['PYDEV_PYTEST_SERVER'] = str(configuration.port)
argv.append('-p')
argv.append('pydev_runfiles_pytest2')
if 'unittest' in sys.modules or 'unittest2' in sys.modules:
sys.stderr.write('pydev test runner error: imported unittest before running pytest.main\n')
return pytest.main(argv)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
if __name__ == '__main__':
try:
main()
finally:
try:
#The server is not a daemon thread, so, we have to ask for it to be killed!
import pydev_runfiles_xml_rpc
pydev_runfiles_xml_rpc.forceServerKill()
except:
pass #Ignore any errors here
import sys
import threading
if hasattr(sys, '_current_frames') and hasattr(threading, 'enumerate'):
import time
import traceback
class DumpThreads(threading.Thread):
def run(self):
time.sleep(10)
thread_id_to_name = {}
try:
for t in threading.enumerate():
thread_id_to_name[t.ident] = '%s (daemon: %s)' % (t.name, t.daemon)
except:
pass
stack_trace = [
'===============================================================================',
'pydev pyunit runner: Threads still found running after tests finished',
'================================= Thread Dump =================================']
for thread_id, stack in sys._current_frames().items():
stack_trace.append('\n-------------------------------------------------------------------------------')
stack_trace.append(" Thread %s" % thread_id_to_name.get(thread_id, thread_id))
stack_trace.append('')
if 'self' in stack.f_locals:
sys.stderr.write(str(stack.f_locals['self'])+'\n')
for filename, lineno, name, line in traceback.extract_stack(stack):
stack_trace.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
stack_trace.append(" %s" % (line.strip()))
stack_trace.append('\n=============================== END Thread Dump ===============================')
sys.stderr.write('\n'.join(stack_trace))
dump_current_frames_thread = DumpThreads()
dump_current_frames_thread.setDaemon(True) # Daemon so that this thread doesn't halt it!
dump_current_frames_thread.start()
| apache-2.0 |
jzbontar/orange-tree | Orange/canvas/gui/splashscreen.py | 16 | 3975 | """
A splash screen widget with support for positioning of the message text.
"""
from PyQt4.QtGui import (
QSplashScreen, QWidget, QPixmap, QPainter, QTextDocument,
QTextBlockFormat, QTextCursor, QApplication
)
from PyQt4.QtCore import Qt
from .utils import is_transparency_supported
class SplashScreen(QSplashScreen):
"""
Splash screen widget.
Parameters
----------
parent : :class:`QWidget`
Parent widget
pixmap : :class:`QPixmap`
Splash window pixmap.
textRect : :class:`QRect`
Bounding rectangle of the shown message on the widget.
"""
def __init__(self, parent=None, pixmap=None, textRect=None, **kwargs):
QSplashScreen.__init__(self, parent, **kwargs)
self.__textRect = textRect
self.__message = ""
self.__color = Qt.black
self.__alignment = Qt.AlignLeft
if pixmap is None:
pixmap = QPixmap()
self.setPixmap(pixmap)
self.setAutoFillBackground(False)
# Also set FramelesWindowHint (if not already set)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def setTextRect(self, rect):
"""
Set the rectangle (:class:`QRect`) in which to show the message text.
"""
if self.__textRect != rect:
self.__textRect = rect
self.update()
def textRect(self):
"""
Return the text message rectangle.
"""
return self.__textRect
def showEvent(self, event):
QSplashScreen.showEvent(self, event)
# Raise to top on show.
self.raise_()
def drawContents(self, painter):
"""
Reimplementation of drawContents to limit the drawing
inside `textRext`.
"""
painter.setPen(self.__color)
painter.setFont(self.font())
if self.__textRect:
rect = self.__textRect
else:
rect = self.rect().adjusted(5, 5, -5, -5)
if Qt.mightBeRichText(self.__message):
doc = QTextDocument()
doc.setHtml(self.__message)
doc.setTextWidth(rect.width())
cursor = QTextCursor(doc)
cursor.select(QTextCursor.Document)
fmt = QTextBlockFormat()
fmt.setAlignment(self.__alignment)
cursor.mergeBlockFormat(fmt)
painter.save()
painter.translate(rect.topLeft())
doc.drawContents(painter)
painter.restore()
else:
painter.drawText(rect, self.__alignment, self.__message)
def showMessage(self, message, alignment=Qt.AlignLeft, color=Qt.black):
"""
Show the `message` with `color` and `alignment`.
"""
# Need to store all this arguments for drawContents (no access
# methods)
self.__alignment = alignment
self.__color = color
self.__message = message
QSplashScreen.showMessage(self, message, alignment, color)
QApplication.instance().processEvents()
# Reimplemented to allow graceful fall back if the windowing system
# does not support transparency.
def setPixmap(self, pixmap):
self.setAttribute(Qt.WA_TranslucentBackground,
pixmap.hasAlpha() and \
is_transparency_supported())
self.__pixmap = pixmap
QSplashScreen.setPixmap(self, pixmap)
if pixmap.hasAlpha() and not is_transparency_supported():
self.setMask(pixmap.createHeuristicMask())
def repaint(self):
QWidget.repaint(self)
QApplication.flush()
def event(self, event):
if event.type() == event.Paint:
pixmap = self.__pixmap
painter = QPainter(self)
if not pixmap.isNull():
painter.drawPixmap(0, 0, pixmap)
self.drawContents(painter)
return True
return QSplashScreen.event(self, event)
| gpl-3.0 |
Andrew-McNab-UK/DIRAC | tests/Integration/DataManagementSystem/FC_scaling_test.py | 3 | 10751 | ########################################################################
# File : FC_Scaling_test
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Test suite for a generic File Catalog scalability tests
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC import S_OK
import sys, pprint, os, numpy
Script.setUsageMessage( """
Test suite for a generic File Catalog scalability tests
""" )
testType = 'noTest'
def setTestType( value ):
global testType
testType = value
return S_OK()
testDir = ''
def setTestDirectory( value ):
global testDir
testDir = value
return S_OK()
nClients = 1
def setNumberOfClients( value ):
global nClients
nClients = int( value )
return S_OK()
nQueries = 100
def setNumberOfQueries( value ):
global nQueries
nQueries = int( value )
return S_OK()
lfnListFile = 'lfns_100.txt'
def setLFNListFile( value ):
global lfnListFile
lfnListFile = value
return S_OK()
outputFile = "output.txt"
def setOutputFile( value ):
global outputFile
outputFile = value
return S_OK()
catalog = 'AugerTestFileCatalog'
def setCatalog( value ):
global catalog
catalog = value
return S_OK()
fullTest = False
def setFullTest( value ):
global fullTest
fullTest = True
return S_OK()
shortRange = False
def setShortRange( value ):
global shortRange
shortRange = True
return S_OK()
verbosity = 0
def setVerbosity( value ):
global verbosity
verbosity += 1
return S_OK()
Script.registerSwitch( "t:", "type=", "test type", setTestType )
Script.registerSwitch( "D:", "directory=", "test directory", setTestDirectory )
Script.registerSwitch( "N:", "clients=", "number of parallel clients", setNumberOfClients )
Script.registerSwitch( "Q:", "queries=", "number of queries in one test", setNumberOfQueries )
Script.registerSwitch( "C:", "catalog=", "catalog to use", setCatalog )
Script.registerSwitch( "L:", "lfnList=", "file with a list of LFNs", setLFNListFile )
Script.registerSwitch( "F", "fullTest", "run the full test", setFullTest )
Script.registerSwitch( "O:", "output=", "file with output result", setOutputFile )
Script.registerSwitch( "v", "verbose", "file with output result", setVerbosity )
Script.registerSwitch( "S", "shortRange", "run short parameter range", setShortRange )
Script.parseCommandLine( ignoreErrors = True )
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
from DIRAC import S_OK
import time
fc = FileCatalog( catalogs=[catalog] )
resultTest = []
def listDirectory( n_queries ):
global testDir
start = time.time()
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.listDirectory( testDir )
resultList.append( time.time() - start )
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, 'Average', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def getBulkReplicas( n_queries ):
global lfnListFile, verbosity
lFile = open(lfnListFile)
lfnList = [ l.strip().replace('//','/') for l in lFile.read().strip().split() ]
lFile.close()
start = time.time()
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.getReplicas( lfnList )
resultList.append( time.time() - start )
if verbosity >= 2:
print "getReplicas: received lfns", len(result['Value']['Successful'])
for lfn in result['Value']['Successful']:
print result['Value']['Successful'][lfn]
if verbosity >= 3:
for lfn,res in result['Value']['Successful'].items():
print lfn
print res
break
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, 'Average', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def getDirectoryReplicas( n_queries ):
global testDir, verbosity
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.getDirectoryReplicas( testDir )
resultList.append( time.time() - start )
if verbosity >= 2:
print "Returned values", len(result['Value']['Successful'][testDir])
for lfn,res in result['Value']['Successful'][testDir].items():
print lfn
print res
break
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getDirectoryReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, '\nAverage', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def finalize(task,result):
global resultTest, verbosity
if verbosity >= 2:
if result['OK']:
print "Test time ", result['Value'], task.getTaskID()
else:
print "Error:", result['Message']
resultTest.append( result['Value'] )
def doException( expt ):
print "Exception", expt
def runTest( ):
global nClients, nQueries, testType, resultTest, testDir, lfnListFile
resultTest = []
pp = ProcessPool( nClients )
testFunction = eval( testType )
for c in xrange( nClients ):
pp.createAndQueueTask( testFunction, [nQueries],
callback=finalize,
exceptionCallback=doException )
pp.processAllResults(3600)
pp.finalize(0)
timeResult = []
for testTime,success,failure in resultTest:
#print testTime,success,failure
timeResult += testTime
averageTime, errorTime = doStats( timeResult )
rateResult = [ nClients/t for t in timeResult ]
averageRate, errorRate = doStats( rateResult )
if testDir:
print "\nTest results for clients %d, %s" % ( nClients, testDir )
else:
print "\nTest results for clients %d, %s" % ( nClients, lfnListFile )
print "Query time: %.2f +/- %.2f" % (averageTime, errorTime)
print "Query rate: %.2f +/- %.2f" % (averageRate, errorRate)
return( (averageTime, errorTime), (averageRate, errorRate) )
def doStats( testArray ):
array = list( testArray )
# Delete min and max value first
del array[ array.index(max(array)) ]
del array[ array.index(min(array)) ]
numArray = numpy.array( array )
average = numpy.mean( numArray )
stddev = numpy.std( numArray )
return (average, stddev)
numberOfFilesList = [ 10, 100, 500, 1000, 2000, 5000, 10000, 15000, 20000 ]
numberOfFilesList_short = [ 100, 1000, 5000, 10000, 20000 ]
numberOfClientsList = [1,2,3,5,7,10,12,15,20,30,50,75]
numberOfClientsList_short = [1,5,10,20]
directoriesList = [ (35455, "/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en18.000/th0.65/2008/11/12"),
(24024, "/auger/prod/QGSjetII_gr20/2008/09/04/en17.500/th0.65"),
#(15205, "/auger/generated/2012-09-03"),
(18391,"/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en17.500/th0.65/2008/11/11"),
(9907, "/auger/prod/QGSjetII_gr20/2008/09/03/en17.500/th0.65"),
(5157, "/auger/prod/QGSjetII_gr20/2008/09/04/en20.000/th0.65"),
(2538, "/auger/prod/QGSjetII_gr21/2009/01/12/en18.500/th0.65"),
(1500, "/auger/prod/epos_gr03_sim/en17.500/th26.000"),
(502, "/auger/prod/REPLICATED20081014/epos_gr08/en21.250/th26.000")
]
directoriesList_short = [ (35455, "/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en18.000/th0.65/2008/11/12"),
(18391,"/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en17.500/th0.65/2008/11/11"),
(5157, "/auger/prod/QGSjetII_gr20/2008/09/04/en20.000/th0.65"),
(1000, "/auger/prod/PhotonLib_gr22/2009/02/27/en17.500/th26.000")
]
directoriesList.reverse()
directoriesList_short.reverse()
def executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r ):
global nClients
nClients = nc
t1,t2 = runTest()
query,querys = t1
rate, rates = t2
fileLabel = "%d files" % nf
queryDict.setdefault( fileLabel, {} )
queryDict[fileLabel][nc] = (query,querys)
rateDict.setdefault( fileLabel, {} )
rateDict[fileLabel][nc] = (rate,rates)
clientLabel = "%d clients" % nc
queryDict_r.setdefault( clientLabel, {} )
queryDict_r[clientLabel][nf] = (query,querys)
rateDict_r.setdefault( clientLabel, {} )
rateDict_r[clientLabel][nf] = (rate,rates)
def runFullTest():
global outputFile, nClients, testDir, lfnListFile, shortRange
queryDict = {}
rateDict = {}
queryDict_r = {}
rateDict_r = {}
ncList = numberOfClientsList
if shortRange:
ncList = numberOfClientsList_short
nfList = numberOfFilesList
if shortRange:
nfList = numberOfFilesList_short
ndList = directoriesList
if shortRange:
ndList = directoriesList_short
for nc in ncList:
if testType in ['getBulkReplicas']:
for nf in nfList:
lfnListFile = "lfns_%d.txt" % nf
executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r )
elif testType in ['getDirectoryReplicas', "listDirectory"]:
for nf, directory in ndList:
testDir = directory
executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r )
# Writing out result
outFile = open( outputFile, "w" )
outFile.write( "Test type %s \n" % testType )
outFile.write( "Number of queries per unit test %d \n" % nQueries )
outFile.write( "Results: \n\n\n" )
outFile.write( 'data_f = ' + str( queryDict ) + '\n\n\n' )
outFile.write( 'data_f_r = ' + str( rateDict ) + '\n\n\n' )
outFile.write( 'data_c = ' + str( queryDict_r ) + '\n\n\n' )
outFile.write( 'data_c_r = ' + str( rateDict_r ) + '\n\n\n' )
outFile.close()
pprint.pprint( queryDict )
pprint.pprint( rateDict )
pprint.pprint( queryDict_r )
pprint.pprint( rateDict_r )
#########################################################################
if os.path.exists( outputFile ):
print "Output file %s already exists, exiting ..."
sys.exit(-1)
if fullTest:
runFullTest()
else:
runTest()
| gpl-3.0 |
trabacus-softapps/openerp-8.0-cc | openerp/addons/survey/wizard/survey_print_statistics.py | 4 | 1888 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class survey_print_statistics(osv.osv_memory):
_name = 'survey.print.statistics'
_columns = {
'survey_ids': fields.many2many('survey', string="Survey", required="1"),
}
def action_next(self, cr, uid, ids, context=None):
"""
Print Survey Statistics in pdf format.
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['survey_ids'], context=context)
res = res and res[0] or {}
datas['form'] = res
datas['model'] = 'survey.print.statistics'
return {
'type': 'ir.actions.report.xml',
'report_name': 'survey.analysis',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_int_literal.py | 21 | 7053 | """Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648)
self.assertEqual(0xffffffff, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648)
self.assertEqual(-(0xffffffff), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648)
self.assertEqual(-0xffffffff, -4294967295)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808)
self.assertEqual(0xffffffffffffffff, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615)
def test_oct_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648)
self.assertEqual(0o37777777777, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648)
self.assertEqual(-(0o37777777777), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648)
self.assertEqual(-0o37777777777, -4294967295)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808)
self.assertEqual(0o1777777777777777777777, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648)
self.assertEqual(0b11111111111111111111111111111111, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
androomerrill/scikit-nano | sknano/generators/__init__.py | 2 | 1771 | # -*- coding: utf-8 -*-
"""
======================================================================
Structure generators (:mod:`sknano.generators`)
======================================================================
.. currentmodule:: sknano.generators
Contents
========
Nanostructure generators
------------------------
.. autosummary::
:toctree: generated/
GeneratorBase
FullereneGenerator
GrapheneGenerator
PrimitiveCellGrapheneGenerator
ConventionalCellGrapheneGenerator
BilayerGrapheneGenerator
MWNTGenerator
MWNTBundleGenerator
SWNTGenerator
SWNTBundleGenerator
UnrolledSWNTGenerator
Bulk structure generators
--------------------------
.. autosummary::
:toctree: generated/
BulkGeneratorBase
AlphaQuartzGenerator
GoldGenerator
CopperGenerator
MoS2Generator
CaesiumChlorideStructureGenerator
DiamondStructureGenerator
BCCStructureGenerator
FCCStructureGenerator
RocksaltStructureGenerator
ZincblendeStructureGenerator
Other
-----
.. autodata:: STRUCTURE_GENERATORS
:annotation: = tuple of recognized generator classes.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
from ._base import *
from ._bulk_structure_generator import *
from ._mixins import *
from ._fullerene_generator import *
from ._graphene_generator import *
from ._bilayer_graphene_generator import *
from ._swnt_generator import *
from ._mwnt_generator import *
from ._nanotube_bundle_generator import *
from ._swnt_bundle_generator import *
from ._mwnt_bundle_generator import *
from ._unrolled_swnt_generator import *
# from ._defect_generators import *
__all__ = [s for s in dir() if not s.startswith('_')]
| bsd-2-clause |
v-iam/azure-sdk-for-python | azure-batch/azure/batch/models/node_file.py | 3 | 1414 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeFile(Model):
"""Information about a file or directory on a compute node.
:param name: The file path.
:type name: str
:param url: The URL of the file.
:type url: str
:param is_directory: Whether the object represents a directory.
:type is_directory: bool
:param properties: The file properties.
:type properties: :class:`FileProperties
<azure.batch.models.FileProperties>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'is_directory': {'key': 'isDirectory', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'FileProperties'},
}
def __init__(self, name=None, url=None, is_directory=None, properties=None):
self.name = name
self.url = url
self.is_directory = is_directory
self.properties = properties
| mit |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/distutils/spawn.py | 81 | 7514 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils.debug import DEBUG
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new process.
'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
# cmd is documented as a list, but just in case some code passes a tuple
# in, protect our %-formatting code against horrible death
cmd = list(cmd)
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError(
"don't know how to spawn programs on platform '%s'" % os.name)
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args
def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError as exc:
# this seems to happen when the command isn't found
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed: %s" % (cmd, exc.args[-1]))
if rc != 0:
# and this reflects the command running but failing
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed with exit status %d" % (cmd, rc))
if sys.platform == 'darwin':
from distutils import sysconfig
_cfg_target = None
_cfg_target_split = None
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
log.info(' '.join(cmd))
if dry_run:
return
executable = cmd[0]
exec_fn = search_path and os.execvp or os.execv
env = None
if sys.platform == 'darwin':
global _cfg_target, _cfg_target_split
if _cfg_target is None:
_cfg_target = sysconfig.get_config_var(
'MACOSX_DEPLOYMENT_TARGET') or ''
if _cfg_target:
_cfg_target_split = [int(x) for x in _cfg_target.split('.')]
if _cfg_target:
# ensure that the deployment target of build process is not less
# than that used when the interpreter was built. This ensures
# extension modules are built with correct compatibility values
cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
'now "%s" but "%s" during configure'
% (cur_target, _cfg_target))
raise DistutilsPlatformError(my_msg)
env = dict(os.environ,
MACOSX_DEPLOYMENT_TARGET=cur_target)
exec_fn = search_path and os.execvpe or os.execve
pid = os.fork()
if pid == 0: # in the child
try:
if env is None:
exec_fn(executable, cmd)
else:
exec_fn(executable, cmd, env)
except OSError as e:
if not DEBUG:
cmd = executable
sys.stderr.write("unable to execute %r: %s\n"
% (cmd, e.strerror))
os._exit(1)
if not DEBUG:
cmd = executable
sys.stderr.write("unable to execute %r for unknown reasons" % cmd)
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while True:
try:
pid, status = os.waitpid(pid, 0)
except OSError as exc:
import errno
if exc.errno == errno.EINTR:
continue
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed: %s" % (cmd, exc.args[-1]))
if os.WIFSIGNALED(status):
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r terminated by signal %d"
% (cmd, os.WTERMSIG(status)))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"command %r failed with exit status %d"
% (cmd, exit_status))
elif os.WIFSTOPPED(status):
continue
else:
if not DEBUG:
cmd = executable
raise DistutilsExecError(
"unknown error executing %r: termination status %d"
% (cmd, status))
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
| lgpl-3.0 |
andela-ifageyinbo/django | tests/generic_relations_regress/tests.py | 300 | 11453 | from django.db.models import Q, Sum
from django.db.models.deletion import ProtectedError
from django.db.utils import IntegrityError
from django.forms.models import modelform_factory
from django.test import TestCase, skipIfDBFeature
from .models import (
A, B, C, D, Address, Board, CharLink, Company, Contact, Content, Developer,
Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2,
Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink,
)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
Test that GenericRelations on inherited classes use the correct content
type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
Test that the correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name='Chef')
Address.objects.create(street='123 Anywhere Place',
city='Conifer', state='CO',
zipcode='80433', content_object=p)
qs = Person.objects.filter(addresses__zipcode='80433')
self.assertEqual(1, qs.count())
self.assertEqual('Chef', qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name='clink')
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name='tlink')
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_q_object_or(self):
"""
Tests that SQL query parameters for generic relations are properly
grouped when OR is used.
Test for bug http://code.djangoproject.com/ticket/11535
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note='note', content_object=note_contact)
org = Organization.objects.create(name='org name')
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
Q(organizations__name__icontains=r'org name'))
self.assertIn(org_contact, qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r'org name') |
Q(notes__note__icontains=r'other note'))
self.assertIn(org_contact, qs)
def test_join_reuse(self):
qs = Person.objects.filter(
addresses__street='foo'
).filter(
addresses__street='bar'
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_generic_relation_ordering(self):
"""
Test that ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by('links__id'))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_is_unsaved(self):
"""Test related to #13085"""
# Fails with another, ORM-level error
dev1 = Developer(name='Joe')
note = Note(note='Deserves promotion', content_object=dev1)
self.assertRaises(IntegrityError, note.save)
def test_target_model_len_zero(self):
"""Test for #13085 -- __len__() returns 0"""
team1 = Team.objects.create(name='Backend devs')
try:
note = Note(note='Deserve a bonus', content_object=team1)
except Exception as e:
if (issubclass(type(e), Exception) and
str(e) == 'Impossible arguments to GFK.get_content_type!'):
self.fail("Saving model with GenericForeignKey to model instance whose "
"__len__ method returns 0 shouldn't fail.")
raise e
note.save()
def test_target_model_nonzero_false(self):
"""Test related to #13085"""
# __nonzero__() returns False -- This actually doesn't currently fail.
# This test validates that
g1 = Guild.objects.create(name='First guild')
note = Note(note='Note for guild', content_object=g1)
note.save()
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name='')
tag = Tag(label='VP', content_object=b1)
tag.save()
def test_ticket_20378(self):
# Create a couple of extra HasLinkThing so that the autopk value
# isn't the same for Link and HasLinkThing.
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
hs3 = HasLinkThing.objects.create()
hs4 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs3)
l2 = Link.objects.create(content_object=hs4)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l1),
[hs3], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l2),
[hs4], lambda x: x)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l2),
[hs1, hs2, hs3], lambda x: x, ordered=False)
self.assertQuerysetEqual(
HasLinkThing.objects.exclude(links=l1),
[hs1, hs2, hs4], lambda x: x, ordered=False)
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
C.objects.filter(b__a__flag=None),
[c1, c3], lambda x: x
)
self.assertQuerysetEqual(
C.objects.exclude(b__a__flag=None),
[c2], lambda x: x
)
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertQuerysetEqual(
D.objects.exclude(b__a__flag=None),
[d2], lambda x: x
)
self.assertQuerysetEqual(
D.objects.filter(b__a__flag=None),
[d1, d3, d4], lambda x: x
)
self.assertQuerysetEqual(
B.objects.filter(a__flag=None),
[b1, b3], lambda x: x
)
self.assertQuerysetEqual(
B.objects.exclude(a__flag=None),
[b2], lambda x: x
)
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
def test_annotate(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
HasLinkThing.objects.create()
b = Board.objects.create(name=str(hs1.pk))
Link.objects.create(content_object=hs2)
l = Link.objects.create(content_object=hs1)
Link.objects.create(content_object=b)
qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
# If content_type restriction isn't in the query's join condition,
# then wrong results are produced here as the link to b will also match
# (b and hs1 have equal pks).
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].links__sum, l.id)
l.delete()
# Now if we don't have proper left join, we will not produce any
# results at all here.
# clear cached results
qs = qs.all()
self.assertEqual(qs.count(), 1)
# Note - 0 here would be a nicer result...
self.assertIs(qs[0].links__sum, None)
# Finally test that filtering works.
self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
def test_filter_targets_related_pk(self):
HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
l = Link.objects.create(content_object=hs2)
self.assertNotEqual(l.object_id, l.pk)
self.assertQuerysetEqual(
HasLinkThing.objects.filter(links=l.pk),
[hs2], lambda x: x)
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
form = GenericRelationForm()
self.assertIn('links', form.fields)
form = GenericRelationForm({'links': None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field('links')
self.assertEqual(links.save_form_data_calls, 1)
def test_ticket_22998(self):
related = Related.objects.create()
content = Content.objects.create(related_obj=related)
Node.objects.create(content=content)
# deleting the Related cascades to the Content cascades to the Node,
# where the pre_delete signal should fire and prevent deletion.
with self.assertRaises(ProtectedError):
related.delete()
def test_ticket_22982(self):
place = Place.objects.create(name='My Place')
self.assertIn('GenericRelatedObjectManager', str(place.links))
| bsd-3-clause |
awkspace/ansible | lib/ansible/modules/network/onyx/onyx_mlag_ipl.py | 118 | 6779 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_mlag_ipl
version_added: "2.5"
author: "Samer Deeb (@samerd)"
short_description: Manage IPL (inter-peer link) on Mellanox ONYX network devices
description:
- This module provides declarative management of IPL (inter-peer link)
management on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.4000
options:
name:
description:
- Name of the interface (port-channel) IPL should be configured on.
required: true
vlan_interface:
description:
- Name of the IPL vlan interface.
state:
description:
- IPL state.
default: present
choices: ['present', 'absent']
peer_address:
description:
- IPL peer IP address.
"""
EXAMPLES = """
- name: run configure ipl
onyx_mlag_ipl:
name: Po1
vlan_interface: Vlan 322
state: present
peer_address: 192.168.7.1
- name: run remove ipl
onyx_mlag_ipl:
name: Po1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface port-channel 1 ipl 1
- interface vlan 1024 ipl 1 peer-address 10.10.10.10
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
from ansible.module_utils.network.onyx.onyx import show_cmd
class OnyxMlagIplModule(BaseOnyxModule):
VLAN_IF_REGEX = re.compile(r'^Vlan \d+')
@classmethod
def _get_element_spec(cls):
return dict(
name=dict(required=True),
state=dict(default='present',
choices=['present', 'absent']),
peer_address=dict(),
vlan_interface=dict(),
)
def init_module(self):
""" module initialization
"""
element_spec = self._get_element_spec()
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(
name=module_params['name'],
state=module_params['state'],
peer_address=module_params['peer_address'],
vlan_interface=module_params['vlan_interface'])
self.validate_param_values(self._required_config)
def _update_mlag_data(self, mlag_data):
if not mlag_data:
return
mlag_summary = mlag_data.get("MLAG IPLs Summary", {})
ipl_id = "1"
ipl_list = mlag_summary.get(ipl_id)
if ipl_list:
ipl_data = ipl_list[0]
vlan_id = ipl_data.get("Vlan Interface")
vlan_interface = ""
if vlan_id != "N/A":
vlan_interface = "Vlan %s" % vlan_id
peer_address = ipl_data.get("Peer IP address")
name = ipl_data.get("Group Port-Channel")
self._current_config = dict(
name=name,
peer_address=peer_address,
vlan_interface=vlan_interface)
def _show_mlag_data(self):
cmd = "show mlag"
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
# called in base class in run function
self._current_config = dict()
mlag_data = self._show_mlag_data()
self._update_mlag_data(mlag_data)
def _get_interface_cmd_name(self, if_name):
if if_name.startswith('Po'):
return if_name.replace("Po", "port-channel ")
self._module.fail_json(
msg='invalid interface name: %s' % if_name)
def _generate_port_channel_command(self, if_name, enable):
if_cmd_name = self._get_interface_cmd_name(if_name)
if enable:
ipl_cmd = 'ipl 1'
else:
ipl_cmd = "no ipl 1"
cmd = "interface %s %s" % (if_cmd_name, ipl_cmd)
return cmd
def _generate_vlan_if_command(self, if_name, enable, peer_address):
if_cmd_name = if_name.lower()
if enable:
ipl_cmd = 'ipl 1 peer-address %s' % peer_address
else:
ipl_cmd = "no ipl 1"
cmd = "interface %s %s" % (if_cmd_name, ipl_cmd)
return cmd
def _generate_no_ipl_commands(self):
curr_interface = self._current_config.get('name')
req_interface = self._required_config.get('name')
if curr_interface == req_interface:
cmd = self._generate_port_channel_command(
req_interface, enable=False)
self._commands.append(cmd)
def _generate_ipl_commands(self):
curr_interface = self._current_config.get('name')
req_interface = self._required_config.get('name')
if curr_interface != req_interface:
if curr_interface and curr_interface != 'N/A':
cmd = self._generate_port_channel_command(
curr_interface, enable=False)
self._commands.append(cmd)
cmd = self._generate_port_channel_command(
req_interface, enable=True)
self._commands.append(cmd)
curr_vlan = self._current_config.get('vlan_interface')
req_vlan = self._required_config.get('vlan_interface')
add_peer = False
if curr_vlan != req_vlan:
add_peer = True
if curr_vlan:
cmd = self._generate_vlan_if_command(curr_vlan, enable=False,
peer_address=None)
self._commands.append(cmd)
curr_peer = self._current_config.get('peer_address')
req_peer = self._required_config.get('peer_address')
if req_peer != curr_peer:
add_peer = True
if add_peer and req_peer:
cmd = self._generate_vlan_if_command(req_vlan, enable=True,
peer_address=req_peer)
self._commands.append(cmd)
def generate_commands(self):
state = self._required_config['state']
if state == 'absent':
self._generate_no_ipl_commands()
else:
self._generate_ipl_commands()
def main():
""" main entry point for module execution
"""
OnyxMlagIplModule.main()
if __name__ == '__main__':
main()
| gpl-3.0 |
alexandrucoman/vbox-nova-driver | nova/tests/functional/v3/test_pci.py | 8 | 7468 | # Copyright 2013 Intel.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
import testtools
from nova import db
from nova import objects
from nova.objects import pci_device_pool
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
skip_msg = "Bug 1426241"
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '0000:04:10.0',
'vendor_id': '8086',
'numa_node': 0,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_0',
'label': 'label_8086_1520',
'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
'request_id': None,
'extra_info': '{"key1": "value1", "key2": "value2"}'
}
fake_db_dev_2 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': '0000:04:10.1',
'vendor_id': '8086',
'numa_node': 1,
'product_id': '1520',
'dev_type': 'type-VF',
'status': 'available',
'dev_id': 'pci_0000_04_10_1',
'label': 'label_8086_1520',
'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
'request_id': None,
'extra_info': '{"key3": "value3", "key4": "value4"}'
}
class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extra_extensions_to_load = ['os-hypervisors']
extension_name = 'os-pci'
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
cpu_info = collections.OrderedDict([
('arch', 'x86_64'),
('model', 'Nehalem'),
('vendor', 'Intel'),
('features', ['pge', 'clflush']),
('topology', {
'cores': 1,
'threads': 1,
'sockets': 4,
}),
])
self.fake_compute_node = objects.ComputeNode(
cpu_info=jsonutils.dumps(cpu_info),
current_workload=0,
disk_available_least=0,
host_ip="1.1.1.1",
state="up",
status="enabled",
free_disk_gb=1028,
free_ram_mb=7680,
hypervisor_hostname="fake-mini",
hypervisor_type="fake",
hypervisor_version=1000,
id=1,
local_gb=1028,
local_gb_used=0,
memory_mb=8192,
memory_mb_used=512,
running_vms=0,
vcpus=1,
vcpus_used=0,
service_id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
pci_device_pools=pci_device_pool.from_pci_stats(
{"count": 5,
"vendor_id": "8086",
"product_id": "1520",
"keya": "valuea",
"extra_info": {
"phys_function": '[["0x0000", '
'"0x04", "0x00",'
' "0x1"]]',
"key1": "value1"}}),)
self.fake_service = objects.Service(
id=2,
host='043b3cacf6f34c90a7245151fc8ebcda',
disabled=False,
disabled_reason=None)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNode.get_by_id")
def test_pci_show(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = self.fake_compute_node
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
@mock.patch("nova.objects.Service.get_by_compute_host")
@mock.patch("nova.objects.ComputeNodeList.get_all")
def test_pci_detail(self, mock_obj, mock_svc_get, mock_service):
mock_obj.return_value = [self.fake_compute_node]
mock_svc_get.return_value = self.fake_service
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/detail')
subs.update(self._get_regexes())
self._verify_response('hypervisors-pci-detail-resp',
subs, response, 200)
class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-pci"
def setUp(self):
raise testtools.TestCase.skipException(skip_msg)
def _fake_pci_device_get_by_id(self, context, id):
return fake_db_dev_1
def _fake_pci_device_get_all_by_node(self, context, id):
return [fake_db_dev_1, fake_db_dev_2]
def test_pci_show(self):
self.stubs.Set(db, 'pci_device_get_by_id',
self._fake_pci_device_get_by_id)
response = self._do_get('os-pci/1')
subs = self._get_regexes()
self._verify_response('pci-show-resp', subs, response, 200)
def test_pci_index(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci')
subs = self._get_regexes()
self._verify_response('pci-index-resp', subs, response, 200)
def test_pci_detail(self):
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
response = self._do_get('os-pci/detail')
subs = self._get_regexes()
self._verify_response('pci-detail-resp', subs, response, 200)
| apache-2.0 |
hcasse/elfmake | elfmake/recipe.py | 1 | 6836 | """Classes used to represent recipes."""
import env
import action
import io
import os
import os.path
import sys
file_db = { } # file database
ext_db = { } # extension database
# base classes
class File(env.MapEnv):
"""Representation of files."""
path = None
recipe = None
is_goal = False
is_target = False
is_sticky = False
actual_path = None
def __init__(self, path):
env.MapEnv.__init__(self, path.get_file() , env.cenv.path, env.cenv)
self.path = path
file_db[str(path)] = self
def set_goal(self):
"""Mark a file as a goal."""
self.is_goal = True
def set_target(self):
"""Mark a file as a target."""
self.is_target = True
def set_sticky(self):
"""Mark a file as sticky, that is, a final target (not intermediate)."""
self.sticky = True
def actual(self):
"""Get the actual path of the file. For target file, this path
is relative to BPATH variable."""
if not self.actual_path:
if not self.is_target:
self.actual_path = self.path
else:
bpath = self["BPATH"]
if not bpath:
self.actual_path = self.path
else:
bpath = env.topenv.path / bpath
bpath = env.Path(bpath)
if self.path.prefixed_by(env.topenv.path):
self.actual_path = bpath / self.path.relative_to(env.topenv.path)
else:
self.actual_path = bpath / self.path
return self.actual_path
def __div__(self, arg):
return self.path / str(arg)
def time(self):
"""Get the last update time of the file."""
if self.is_goal:
return 0
else:
return self.actual().get_mod_time()
def younger_than(self, f):
"""Test if the current file is younger than the given one."""
if self.is_goal:
return True
else:
return self.time() < f.time()
def __str__(self):
path = self.actual()
if path.prefixed_by(env.topdir) or path.prefixed_by(env.curdir()):
return str(path.relative_to_cur())
else:
return str(path)
def get_file(path):
"""Get the file matching the given path in the DB. Apply
localisation rules relative to a particular make.py if the path
is not absolute."""
# apply localisation rule
if not os.path.isabs(str(path)):
path = env.cenv.path / path
else:
path = env.Path(path)
path = path.norm()
# find the file
if file_db.has_key(str(path)):
return file_db[str(path)]
else:
return File(path)
def get_files(paths):
"""Apply get_file on straight arguments of recipes."""
if not paths:
return []
if not isinstance(paths, list):
paths = [ paths ]
r = []
for p in paths:
if not isinstance(p, File):
p = get_file(p)
r.append(p)
return r
class Recipe:
"""A recipe to build files."""
ress = None
deps = None
env = None
cwd = None
def __init__(self, ress, deps = None):
ress = get_files(ress)
deps = get_files(deps)
self.ress = ress
self.deps = deps
for f in ress:
f.recipe = self
f.is_target = True
self.env = env.cenv
if hasattr(ress[0], 'cwd'):
self.cwd = ress[0].cwd
else:
self.cwd = self.env.path
def action(self, ctx):
"""Execute the receipe."""
pass
def display_action(self, out):
pass
def display(self, out):
out.write("%s: %s\n" % (" ".join([str(f) for f in self.ress]), " ".join([str(f) for f in self.deps])))
self.display_action(out)
out.write("\n")
class FunRecipe(Recipe):
"""A recipe that activates a function."""
fun = None
def __init__(self, fun, ress, deps):
Recipe.__init__(self, ress, deps)
self.fun = fun
def display_action(self, out):
out.write("\t<internal>\n")
def action(self, ctx):
self.fun(self.ress, self.deps, ctx)
class Ext:
"""Represent the support for a file extension."""
ext = None
gens = None
back = None
def __init__(self, ext):
self.ext = ext
self.gens = { }
self.backs = []
ext_db[ext] = self
def update(self, ext, gen):
"""Update extension for the given generator
and perform backward propagation."""
self.gens[ext] = gen
for back in self.backs:
back.dep.update(ext, back)
def get_ext(ext):
"""Obtain an extension."""
if ext_db.has_key(ext):
return ext_db[ext]
else:
return Ext(ext)
class Gen:
"""A generator of recipe."""
res = None
dep = None
def __init__(self, res, dep):
self.res = get_ext(res)
self.dep = get_ext(dep)
# update back link
self.res.backs.append(self)
# update current gens
self.dep.update(res, self)
for ext in self.dep.gens:
self.res.update(ext, self)
def gen(self, res, dep):
"""Generate a recipe to produce the given result
from the given dependency."""
pass
class FunGen(Gen):
"""A simple recipe generator from a function."""
fun = None
def __init__(self, res, dep, fun):
Gen.__init__(self, res, dep)
self.fun = fun
def gen(self, res, dep):
return FunRecipe(self.fun, [res], [dep])
def gen(dir, rext, dep):
"""Generate recipes to build res. A generation string is found between
file src and res. Each intermediate file has for name the kernel of res
(generated files will be put in the res directory). """
dir = env.Path(dir)
dep = env.Path(dep)
# prepare the kernel
b = dep.get_base()
dext = dep.get_ext()
#b, dext = os.path.splitext(dep)
#_, n = os.path.split(b)
n = b.get_file()
kern = dir / n #os.path.join(dir, n)
# initialize lookup process
if not ext_db.has_key(dext):
io.DEF.print_error("don't know how to build '%s' from '%s'" % (rext, dep))
exit(1)
ext = ext_db[dext]
prev = dep
# end when dep is found
while ext.ext <> rext:
gen = ext.gens[rext]
next = kern + gen.res.ext
gen.gen(next, prev)
prev = next
ext = gen.res
# return result
return prev
def fix(path):
"""Fix a path according to the current directory."""
if isinstance(path, list):
return [str(get_file(p)) for p in path]
else:
return str(get_file(path))
class ActionRecipe(Recipe):
"""A recipe that supports an action. object for generation."""
act = None
def __init__(self, ress, deps, actions):
Recipe.__init__(self, ress, deps)
self.act = action.make_actions(actions).instantiate(self)
def action(self, ctx):
if self.act:
self.act.execute(ctx)
def display_action(self, out):
self.act.display(out)
class ActionGen(Gen):
"""A recipe generator supporting simple actions."""
action = None
def __init__(self, res, dep, action):
Gen.__init__(self, res, dep)
self.action = action
def gen(self, res, dep):
return ActionRecipe([res], [dep], self.action)
def rule(ress, deps, *actions):
"""Build a rule with actions."""
ActionRecipe(ress, deps, make_actions(actions))
def goal(goal, deps, actions = action.Action()):
"""Build a goal with the following dependencies."""
path = env.Path(env.cenv.path) / goal
file = get_file(str(path))
if file.recipe:
raise env.ElfError("a goal already named '%s' already exist!" % goal)
else:
file.set_goal()
file.recipe = ActionRecipe(goal, deps, actions)
return
| gpl-3.0 |
wkschwartz/django | tests/gis_tests/test_spatialrefsys.py | 17 | 5332 | import re
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.functional import cached_property
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ?',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
'wkt': re.sub(r'[\s+]', '', """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
""")
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj_re': r'\+proj=lcc (\+lat_1=30.28333333333333? |\+lat_2=28.38333333333333? |\+lat_0=27.83333333333333? |'
r'\+lon_0=-99 ){4}\+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ?',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(TestCase):
@cached_property
def SpatialRefSys(self):
return connection.ops.connection.ops.spatial_ref_sys()
def test_get_units(self):
epsg_4326 = next(f for f in test_srs if f['srid'] == 4326)
unit, unit_name = self.SpatialRefSys().get_units(epsg_4326['wkt'])
self.assertEqual(unit_name, 'degree')
self.assertAlmostEqual(unit, 0.01745329251994328)
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = self.SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
# Compare case-insensitively because srs.auth_name is lowercase
# ("epsg") on Spatialite.
if not connection.ops.oracle or oracle_flag:
self.assertIs(srs.auth_name.upper().startswith(auth_name), True)
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No PROJ and different srtext on Oracle.
if not connection.ops.oracle:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
self.assertRegex(srs.proj4text, sd['proj_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = self.SpatialRefSys.objects.get(srid=sd['srid'])
self.assertTrue(sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
self.assertIs(sr.name.startswith(sd['name']), True)
# Testing the SpatialReference object directly.
if not connection.ops.oracle:
srs = sr.srs
self.assertRegex(srs.proj, sd['proj_re'])
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = self.SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
self.SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = self.SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
self.SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
| bsd-3-clause |
orezpraw/gensim | gensim/test/test_miislita.py | 83 | 3928 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module replicates the miislita vector spaces from
"A Linear Algebra Approach to the Vector Space Model -- A Fast Track Tutorial"
by Dr. E. Garcia, [email protected]
See http://www.miislita.com for further details.
"""
from __future__ import division # always use floats
from __future__ import with_statement
import logging
import tempfile
import unittest
import bz2
import os
from gensim import utils, corpora, models, similarities
# sample data files are located in the same folder
module_path = os.path.dirname(__file__)
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
logger = logging.getLogger('test_miislita')
def get_tmpfile(suffix):
return os.path.join(tempfile.gettempdir(), suffix)
class CorpusMiislita(corpora.TextCorpus):
stoplist = set('for a of the and to in on'.split())
def get_texts(self):
"""
Parse documents from the .cor file provided in the constructor. Lowercase
each document and ignore some stopwords.
.cor format: one document per line, words separated by whitespace.
"""
with self.getstream() as stream:
for doc in stream:
yield [word for word in utils.to_unicode(doc).lower().split()
if word not in CorpusMiislita.stoplist]
def __len__(self):
"""Define this so we can use `len(corpus)`"""
if 'length' not in self.__dict__:
logger.info("caching corpus size (calculating number of documents)")
self.length = sum(1 for doc in self.get_texts())
return self.length
class TestMiislita(unittest.TestCase):
def test_textcorpus(self):
"""Make sure TextCorpus can be serialized to disk. """
# construct corpus from file
miislita = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
# make sure serializing works
ftmp = get_tmpfile('test_textcorpus.mm')
corpora.MmCorpus.save_corpus(ftmp, miislita)
self.assertTrue(os.path.exists(ftmp))
# make sure deserializing gives the same result
miislita2 = corpora.MmCorpus(ftmp)
self.assertEqual(list(miislita), list(miislita2))
def test_save_load_ability(self):
"""
Make sure we can save and load (un/pickle) TextCorpus objects (as long
as the underlying input isn't a file-like object; we cannot pickle those).
"""
# construct corpus from file
corpusname = datapath('miIslita.cor')
miislita = CorpusMiislita(corpusname)
# pickle to disk
tmpf = get_tmpfile('tc_test.cpickle')
miislita.save(tmpf)
miislita2 = CorpusMiislita.load(tmpf)
self.assertEqual(len(miislita), len(miislita2))
self.assertEqual(miislita.dictionary.token2id, miislita2.dictionary.token2id)
def test_miislita_high_level(self):
# construct corpus from file
miislita = CorpusMiislita(datapath('miIslita.cor'))
# initialize tfidf transformation and similarity index
tfidf = models.TfidfModel(miislita, miislita.dictionary, normalize=False)
index = similarities.SparseMatrixSimilarity(tfidf[miislita], num_features=len(miislita.dictionary))
# compare to query
query = 'latent semantic indexing'
vec_bow = miislita.dictionary.doc2bow(query.lower().split())
vec_tfidf = tfidf[vec_bow]
# perform a similarity query against the corpus
sims_tfidf = index[vec_tfidf]
# for the expected results see the article
expected = [0.0, 0.2560, 0.7022, 0.1524, 0.3334]
for i, value in enumerate(expected):
self.assertAlmostEqual(sims_tfidf[i], value, 2)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-3.0 |
gangadharkadam/v4_erp | erpnext/setup/doctype/backup_manager/backup_dropbox.py | 41 | 4776 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# SETUP:
# install pip install --upgrade dropbox
#
# Create new Dropbox App
#
# in conf.py, set oauth2 settings
# dropbox_access_key
# dropbox_access_secret
from __future__ import unicode_literals
import os
import frappe
from frappe.utils import get_request_site_address, cstr
from frappe import _
@frappe.whitelist()
def get_dropbox_authorize_url():
sess = get_dropbox_session()
request_token = sess.obtain_request_token()
return_address = get_request_site_address(True) \
+ "?cmd=erpnext.setup.doctype.backup_manager.backup_dropbox.dropbox_callback"
url = sess.build_authorize_url(request_token, return_address)
return {
"url": url,
"key": request_token.key,
"secret": request_token.secret,
}
@frappe.whitelist(allow_guest=True)
def dropbox_callback(oauth_token=None, not_approved=False):
from dropbox import client
if not not_approved:
if frappe.db.get_value("Backup Manager", None, "dropbox_access_key")==oauth_token:
allowed = 1
message = "Dropbox access allowed."
sess = get_dropbox_session()
sess.set_request_token(frappe.db.get_value("Backup Manager", None, "dropbox_access_key"),
frappe.db.get_value("Backup Manager", None, "dropbox_access_secret"))
access_token = sess.obtain_access_token()
frappe.db.set_value("Backup Manager", "Backup Manager", "dropbox_access_key", access_token.key)
frappe.db.set_value("Backup Manager", "Backup Manager", "dropbox_access_secret", access_token.secret)
frappe.db.set_value("Backup Manager", "Backup Manager", "dropbox_access_allowed", allowed)
dropbox_client = client.DropboxClient(sess)
try:
dropbox_client.file_create_folder("files")
except:
pass
else:
allowed = 0
message = "Illegal Access Token Please try again."
else:
allowed = 0
message = "Dropbox Access not approved."
frappe.local.message_title = "Dropbox Approval"
frappe.local.message = "<h3>%s</h3><p>Please close this window.</p>" % message
if allowed:
frappe.local.message_success = True
frappe.db.commit()
frappe.response['type'] = 'page'
frappe.response['page_name'] = 'message.html'
def backup_to_dropbox():
from dropbox import client, session
from frappe.utils.backups import new_backup
from frappe.utils import get_files_path, get_backups_path
if not frappe.db:
frappe.connect()
sess = session.DropboxSession(frappe.conf.dropbox_access_key, frappe.conf.dropbox_secret_key, "app_folder")
sess.set_token(frappe.db.get_value("Backup Manager", None, "dropbox_access_key"),
frappe.db.get_value("Backup Manager", None, "dropbox_access_secret"))
dropbox_client = client.DropboxClient(sess)
# upload database
backup = new_backup()
filename = os.path.join(get_backups_path(), os.path.basename(backup.backup_path_db))
upload_file_to_dropbox(filename, "/database", dropbox_client)
frappe.db.close()
response = dropbox_client.metadata("/files")
# upload files to files folder
did_not_upload = []
error_log = []
path = get_files_path()
for filename in os.listdir(path):
filename = cstr(filename)
found = False
filepath = os.path.join(path, filename)
for file_metadata in response["contents"]:
if os.path.basename(filepath) == os.path.basename(file_metadata["path"]) and os.stat(filepath).st_size == int(file_metadata["bytes"]):
found = True
break
if not found:
try:
upload_file_to_dropbox(filepath, "/files", dropbox_client)
except Exception:
did_not_upload.append(filename)
error_log.append(frappe.get_traceback())
frappe.connect()
return did_not_upload, list(set(error_log))
def get_dropbox_session():
try:
from dropbox import session
except:
frappe.msgprint(_("Please install dropbox python module"), raise_exception=1)
if not (frappe.conf.dropbox_access_key or frappe.conf.dropbox_secret_key):
frappe.throw(_("Please set Dropbox access keys in your site config"))
sess = session.DropboxSession(frappe.conf.dropbox_access_key, frappe.conf.dropbox_secret_key, "app_folder")
return sess
def upload_file_to_dropbox(filename, folder, dropbox_client):
from dropbox import rest
size = os.stat(filename).st_size
with open(filename, 'r') as f:
# if max packet size reached, use chunked uploader
max_packet_size = 4194304
if size > max_packet_size:
uploader = dropbox_client.get_chunked_uploader(f, size)
while uploader.offset < size:
try:
uploader.upload_chunked()
uploader.finish(folder + "/" + os.path.basename(filename), overwrite=True)
except rest.ErrorResponse:
pass
else:
dropbox_client.put_file(folder + "/" + os.path.basename(filename), f, overwrite=True)
if __name__=="__main__":
backup_to_dropbox() | agpl-3.0 |
btnpushnmunky/cupcake | monsters.py | 1 | 1918 | import pygame
import os
from random import randint
UP = 3
DOWN = 7
RIGHT = 5
LEFT = 9
EXEC_DIR = os.path.dirname(__file__)
class Monster(pygame.sprite.Sprite):
""" This is our main monster class """
def __init__(self, initial_position, type, direction):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(type + '.png')
self.rect = self.image.get_rect()
self.rect.topleft = initial_position
self.next_update_time = 0
self.bottom = self.rect.bottom
self.top = self.rect.top
self.right = self.rect.right
self.left = self.rect.left
self.direction = direction
self.type = type
self.speed = randint(1,5)
def update(self, plane, bounds):
self.top = self.rect.top
self.left = self.rect.left
self.right = self.rect.right
self.bottom = self.rect.bottom
if plane == 'horizontal':
if self.direction == RIGHT:
self.rect.left += 1 * self.speed
if self.right > bounds:
self.reverse()
elif self.direction == LEFT:
self.rect.left -= 1 * self.speed
if self.left < 0:
self.reverse()
elif plane == 'vertical':
if self.direction == UP:
self.rect.top -= 1 * self.speed
if self.top < 30:
self.reverse()
elif self.direction == DOWN:
self.rect.top += 1 * self.speed
if self.bottom > bounds:
self.reverse()
def reverse(self):
if self.direction == RIGHT:
self.direction = LEFT
elif self.direction == LEFT:
self.direction = RIGHT
elif self.direction == UP:
self.direction = DOWN
elif self.direction == DOWN:
self.direction = UP
| mit |
julien78910/CouchPotatoServer | libs/rtorrent/rpc/__init__.py | 158 | 10775 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import inspect
import rtorrent
import re
from rtorrent.common import bool_to_int, convert_version_tuple_to_str,\
safe_repr
from rtorrent.err import MethodError
from rtorrent.compat import xmlrpclib
def get_varname(rpc_call):
"""Transform rpc method into variable name.
@newfield example: Example
@example: if the name of the rpc method is 'p.get_down_rate', the variable
name will be 'down_rate'
"""
# extract variable name from xmlrpc func name
r = re.search(
"([ptdf]\.|system\.|get\_|is\_|set\_)+([^=]*)", rpc_call, re.I)
if r:
return(r.groups()[-1])
else:
return(None)
def _handle_unavailable_rpc_method(method, rt_obj):
msg = "Method isn't available."
if rt_obj._get_client_version_tuple() < method.min_version:
msg = "This method is only available in " \
"RTorrent version v{0} or later".format(
convert_version_tuple_to_str(method.min_version))
raise MethodError(msg)
class DummyClass:
def __init__(self):
pass
class Method:
"""Represents an individual RPC method"""
def __init__(self, _class, method_name,
rpc_call, docstring=None, varname=None, **kwargs):
self._class = _class # : Class this method is associated with
self.class_name = _class.__name__
self.method_name = method_name # : name of public-facing method
self.rpc_call = rpc_call # : name of rpc method
self.docstring = docstring # : docstring for rpc method (optional)
self.varname = varname # : variable for the result of the method call, usually set to self.varname
self.min_version = kwargs.get("min_version", (
0, 0, 0)) # : Minimum version of rTorrent required
self.boolean = kwargs.get("boolean", False) # : returns boolean value?
self.post_process_func = kwargs.get(
"post_process_func", None) # : custom post process function
self.aliases = kwargs.get(
"aliases", []) # : aliases for method (optional)
self.required_args = []
#: Arguments required when calling the method (not utilized)
self.method_type = self._get_method_type()
if self.varname is None:
self.varname = get_varname(self.rpc_call)
assert self.varname is not None, "Couldn't get variable name."
def __repr__(self):
return safe_repr("Method(method_name='{0}', rpc_call='{1}')",
self.method_name, self.rpc_call)
def _get_method_type(self):
"""Determine whether method is a modifier or a retriever"""
if self.method_name[:4] == "set_": return('m') # modifier
else:
return('r') # retriever
def is_modifier(self):
if self.method_type == 'm':
return(True)
else:
return(False)
def is_retriever(self):
if self.method_type == 'r':
return(True)
else:
return(False)
def is_available(self, rt_obj):
if rt_obj._get_client_version_tuple() < self.min_version or \
self.rpc_call not in rt_obj._get_rpc_methods():
return(False)
else:
return(True)
class Multicall:
def __init__(self, class_obj, **kwargs):
self.class_obj = class_obj
if class_obj.__class__.__name__ == "RTorrent":
self.rt_obj = class_obj
else:
self.rt_obj = class_obj._rt_obj
self.calls = []
def add(self, method, *args):
"""Add call to multicall
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
@param args: call arguments
"""
# if a raw rpc method was given instead of a Method instance,
# try and find the instance for it. And if all else fails, create a
# dummy Method instance
if isinstance(method, str):
result = find_method(method)
# if result not found
if result == -1:
method = Method(DummyClass, method, method)
else:
method = result
# ensure method is available before adding
if not method.is_available(self.rt_obj):
_handle_unavailable_rpc_method(method, self.rt_obj)
self.calls.append((method, args))
def list_calls(self):
for c in self.calls:
print(c)
def call(self):
"""Execute added multicall calls
@return: the results (post-processed), in the order they were added
@rtype: tuple
"""
m = xmlrpclib.MultiCall(self.rt_obj._get_conn())
for call in self.calls:
method, args = call
rpc_call = getattr(method, "rpc_call")
getattr(m, rpc_call)(*args)
results = m()
results = tuple(results)
results_processed = []
for r, c in zip(results, self.calls):
method = c[0] # Method instance
result = process_result(method, r)
results_processed.append(result)
# assign result to class_obj
exists = hasattr(self.class_obj, method.varname)
if not exists or not inspect.ismethod(getattr(self.class_obj, method.varname)):
setattr(self.class_obj, method.varname, result)
return(tuple(results_processed))
def call_method(class_obj, method, *args):
"""Handles single RPC calls
@param class_obj: Peer/File/Torrent/Tracker/RTorrent instance
@type class_obj: object
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
"""
if method.is_retriever():
args = args[:-1]
else:
assert args[-1] is not None, "No argument given."
if class_obj.__class__.__name__ == "RTorrent":
rt_obj = class_obj
else:
rt_obj = class_obj._rt_obj
# check if rpc method is even available
if not method.is_available(rt_obj):
_handle_unavailable_rpc_method(method, rt_obj)
m = Multicall(class_obj)
m.add(method, *args)
# only added one method, only getting one result back
ret_value = m.call()[0]
####### OBSOLETE ##########################################################
# if method.is_retriever():
# #value = process_result(method, ret_value)
# value = ret_value #MultiCall already processed the result
# else:
# # we're setting the user's input to method.varname
# # but we'll return the value that xmlrpc gives us
# value = process_result(method, args[-1])
##########################################################################
return(ret_value)
def find_method(rpc_call):
"""Return L{Method} instance associated with given RPC call"""
method_lists = [
rtorrent.methods,
rtorrent.file.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
rtorrent.torrent.methods,
]
for l in method_lists:
for m in l:
if m.rpc_call.lower() == rpc_call.lower():
return(m)
return(-1)
def process_result(method, result):
"""Process given C{B{result}} based on flags set in C{B{method}}
@param method: L{Method} instance
@type method: Method
@param result: result to be processed (the result of given L{Method} instance)
@note: Supported Processing:
- boolean - convert ones and zeros returned by rTorrent and
convert to python boolean values
"""
# handle custom post processing function
if method.post_process_func is not None:
result = method.post_process_func(result)
# is boolean?
if method.boolean:
if result in [1, '1']:
result = True
elif result in [0, '0']:
result = False
return(result)
def _build_rpc_methods(class_, method_list):
"""Build glorified aliases to raw RPC methods"""
instance = None
if not inspect.isclass(class_):
instance = class_
class_ = instance.__class__
for m in method_list:
class_name = m.class_name
if class_name != class_.__name__:
continue
if class_name == "RTorrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, bool_to_int(arg))
elif class_name == "Torrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name in ["Tracker", "File"]:
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Peer":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Group":
caller = lambda arg = None, method = m: \
call_method(instance, method, bool_to_int(arg))
if m.docstring is None:
m.docstring = ""
# print(m)
docstring = """{0}
@note: Variable where the result for this method is stored: {1}.{2}""".format(
m.docstring,
class_name,
m.varname)
caller.__doc__ = docstring
for method_name in [m.method_name] + list(m.aliases):
if instance is None:
setattr(class_, method_name, caller)
else:
setattr(instance, method_name, caller)
| gpl-3.0 |
amir-qayyum-khan/edx-platform | common/djangoapps/request_cache/middleware.py | 9 | 3741 | """
An implementation of a RequestCache. This cache is reset at the beginning
and end of every request.
"""
import crum
import threading
class _RequestCache(threading.local):
"""
A thread-local for storing the per-request cache.
"""
def __init__(self):
super(_RequestCache, self).__init__()
self.data = {}
REQUEST_CACHE = _RequestCache()
class RequestCache(object):
@classmethod
def get_request_cache(cls, name=None):
"""
This method is deprecated. Please use :func:`request_cache.get_cache`.
"""
if name is None:
return REQUEST_CACHE
else:
return REQUEST_CACHE.data.setdefault(name, {})
@classmethod
def get_current_request(cls):
"""
This method is deprecated. Please use :func:`request_cache.get_request`.
"""
return crum.get_current_request()
@classmethod
def clear_request_cache(cls):
"""
Empty the request cache.
"""
REQUEST_CACHE.data = {}
def process_request(self, request):
self.clear_request_cache()
return None
def process_response(self, request, response):
self.clear_request_cache()
return response
def process_exception(self, request, exception): # pylint: disable=unused-argument
"""
Clear the RequestCache after a failed request.
"""
self.clear_request_cache()
return None
def request_cached(f):
"""
A decorator for wrapping a function and automatically handles caching its return value, as well as returning
that cached value for subsequent calls to the same function, with the same parameters, within a given request.
Notes:
- we convert arguments and keyword arguments to their string form to build the cache key, so if you have
args/kwargs that can't be converted to strings, you're gonna have a bad time (don't do it)
- cache key cardinality depends on the args/kwargs, so if you're caching a function that takes five arguments,
you might have deceptively low cache efficiency. prefer function with fewer arguments.
- we use the default request cache, not a named request cache (this shouldn't matter, but just mentioning it)
- benchmark, benchmark, benchmark! if you never measure, how will you know you've improved? or regressed?
Arguments:
f (func): the function to wrap
Returns:
func: a wrapper function which will call the wrapped function, passing in the same args/kwargs,
cache the value it returns, and return that cached value for subsequent calls with the
same args/kwargs within a single request
"""
def wrapper(*args, **kwargs):
"""
Wrapper function to decorate with.
"""
# Build our cache key based on the module the function belongs to, the functions name, and a stringified
# list of arguments and a query string-style stringified list of keyword arguments.
converted_args = map(str, args)
converted_kwargs = map(str, reduce(list.__add__, map(list, sorted(kwargs.iteritems())), []))
cache_keys = [f.__module__, f.func_name] + converted_args + converted_kwargs
cache_key = '.'.join(cache_keys)
# Check to see if we have a result in cache. If not, invoke our wrapped
# function. Cache and return the result to the caller.
rcache = RequestCache.get_request_cache()
if cache_key in rcache.data:
return rcache.data.get(cache_key)
else:
result = f(*args, **kwargs)
rcache.data[cache_key] = result
return result
return wrapper
| agpl-3.0 |
pikeBishop/OMP_gpxReport | examples/geopy/geocoders/opencage.py | 13 | 7091 | """
:class:`.OpenCage` is the Opencagedata geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("OpenCage", )
class OpenCage(Geocoder):
"""
Geocoder using the Open Cage Data API. Documentation at:
http://geocoder.opencagedata.com/api.html
..versionadded:: 1.1.0
"""
def __init__(
self,
api_key,
domain='api.opencagedata.com',
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
): # pylint: disable=R0913
"""
Initialize a customized Open Cage Data geocoder.
:param string api_key: The API key required by Open Cage Data
to perform geocoding requests. You can get your key here:
https://developer.opencagedata.com/
:param string domain: Currently it is 'api.opencagedata.com', can
be changed for testing purposes.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(OpenCage, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies
)
self.api_key = api_key
self.domain = domain.strip('/')
self.scheme = scheme
self.api = '%s://%s/geocode/v1/json' % (self.scheme, self.domain)
def geocode(
self,
query,
bounds=None,
country=None,
language=None,
exactly_one=True,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The query string to be geocoded; this must
be URL encoded.
:param string language: an IETF format language code (such as `es`
for Spanish or pt-BR for Brazilian Portuguese); if this is
omitted a code of `en` (English) will be assumed by the remote
service.
:param string bounds: Provides the geocoder with a hint to the region
that the query resides in. This value will help the geocoder
but will not restrict the possible results to the supplied
region. The bounds parameter should be specified as 4
coordinate points forming the south-west and north-east
corners of a bounding box. For example,
`bounds=-0.563160,51.280430,0.278970,51.683979`.
:param string country: Provides the geocoder with a hint to the
country that the query resides in. This value will help the
geocoder but will not restrict the possible results to the
supplied country. The country code is a 3 character code as
defined by the ISO 3166-1 Alpha 3 standard.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self.format_string % query,
}
if bounds:
params['bounds'] = bounds
if bounds:
params['language'] = language
if bounds:
params['country'] = country
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
language=None,
exactly_one=False,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param string language: The language in which to return results.
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self._coerce_point_to_string(query),
}
if language:
params['language'] = language
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted')
latitude = place['geometry']['lat']
longitude = place['geometry']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
status_code = status['code']
if status_code == 429:
# Rate limit exceeded
raise GeocoderQuotaExceeded(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.'
)
if status_code == 200:
# When there are no results, just return.
return
if status_code == 403:
raise GeocoderQueryError(
'Your request was denied.'
)
else:
raise GeocoderQueryError('Unknown error.')
| gpl-2.0 |
atiqueahmedziad/addons-server | src/olympia/legacy_discovery/views.py | 2 | 2723 | from django.db.transaction import non_atomic_requests
from django.forms.models import modelformset_factory
from django.shortcuts import redirect
from olympia import amo
from olympia.amo.utils import render
from olympia.zadmin.decorators import admin_required
from .forms import DiscoveryModuleForm
from .models import DiscoveryModule
from .modules import registry as module_registry
@non_atomic_requests
def promos(request, context, version, platform, compat_mode='strict'):
if platform:
platform = platform.lower()
platform = amo.PLATFORM_DICT.get(platform, amo.PLATFORM_ALL)
modules = get_modules(request, platform.api_name, version)
return render(request, 'addons/impala/homepage_promos.html',
{'modules': modules})
def get_modules(request, platform, version):
lang = request.LANG
qs = DiscoveryModule.objects.filter(app=request.APP.id)
# Remove any modules without a registered backend or an ordering.
modules = [m for m in qs
if m.module in module_registry and m.ordering is not None]
# Remove modules that specify a locales string we're not part of.
modules = [m for m in modules
if not m.locales or lang in m.locales.split()]
modules = sorted(modules, key=lambda x: x.ordering)
return [module_registry[m.module](request, platform, version)
for m in modules]
@admin_required
@non_atomic_requests
def module_admin(request):
APP = request.APP
# Custom sorting to drop ordering=NULL objects to the bottom.
qs = DiscoveryModule.objects.raw("""
SELECT * from discovery_modules WHERE app_id = %s
ORDER BY ordering IS NULL, ordering""", [APP.id])
qs.ordered = True # The formset looks for this.
_sync_db_and_registry(qs, APP.id)
Form = modelformset_factory(DiscoveryModule, form=DiscoveryModuleForm,
can_delete=True, extra=0)
formset = Form(request.POST or None, queryset=qs)
if request.method == 'POST' and formset.is_valid():
formset.save()
return redirect('discovery.module_admin')
return render(
request, 'legacy_discovery/module_admin.html', {'formset': formset})
def _sync_db_and_registry(qs, app_id):
"""Match up the module registry and DiscoveryModule rows in the db."""
existing = dict((m.module, m) for m in qs)
to_add = [m for m in module_registry if m not in existing]
to_delete = [m for m in existing if m not in module_registry]
for m in to_add:
DiscoveryModule.objects.get_or_create(module=m, app=app_id)
DiscoveryModule.objects.filter(module__in=to_delete, app=app_id).delete()
if to_add or to_delete:
qs._result_cache = None
| bsd-3-clause |
TomBaxter/osf.io | osf/models/tag.py | 28 | 1187 | from django.db import models
from .base import BaseModel
class TagManager(models.Manager):
"""Manager that filters out system tags by default.
"""
def get_queryset(self):
return super(TagManager, self).get_queryset().filter(system=False)
class Tag(BaseModel):
name = models.CharField(db_index=True, max_length=1024)
system = models.BooleanField(default=False)
objects = TagManager()
all_tags = models.Manager()
def __unicode__(self):
if self.system:
return 'System Tag: {}'.format(self.name)
return u'{}'.format(self.name)
def _natural_key(self):
return hash(self.name + str(self.system))
@property
def _id(self):
return self.name.lower()
@classmethod
def load(cls, data, system=False):
"""For compatibility with v1: the tag name used to be the _id,
so we make Tag.load('tagname') work as if `name` were the primary key.
"""
try:
return cls.all_tags.get(system=system, name=data)
except cls.DoesNotExist:
return None
class Meta:
unique_together = ('name', 'system')
ordering = ('name', )
| apache-2.0 |
rprata/boost | tools/build/src/util/set.py | 49 | 1240 | # (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
from utility import to_seq
def difference (b, a):
""" Returns the elements of B that are not in A.
"""
result = []
for element in b:
if not element in a:
result.append (element)
return result
def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
"""
result = []
for v in set1:
if v in set2:
result.append (v)
return result
def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
"""
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True
def equal (a, b):
""" Returns True iff 'a' contains the same elements as 'b', irrespective of their order.
# TODO: Python 2.4 has a proper set class.
"""
return contains (a, b) and contains (b, a)
| gpl-2.0 |
Lujeni/ansible | lib/ansible/modules/network/cloudengine/ce_interface_ospf.py | 8 | 30951 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_interface_ospf
version_added: "2.4"
short_description: Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches.
description:
- Manages configuration of an OSPF interface instanceon HUAWEI CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
interface:
description:
- Full name of interface, i.e. 40GE1/0/10.
required: true
process_id:
description:
- Specifies a process ID.
The value is an integer ranging from 1 to 4294967295.
required: true
area:
description:
- Ospf area associated with this ospf process.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer between 1 and 4294967295.
required: true
cost:
description:
- The cost associated with this interface.
Valid values are an integer in the range from 1 to 65535.
hello_interval:
description:
- Time between sending successive hello packets.
Valid values are an integer in the range from 1 to 65535.
dead_interval:
description:
- Time interval an ospf neighbor waits for a hello
packet before tearing down adjacencies. Valid values are an
integer in the range from 1 to 235926000.
silent_interface:
description:
- Setting to true will prevent this interface from receiving
HELLO packets. Valid values are 'true' and 'false'.
type: bool
default: 'no'
auth_mode:
description:
- Specifies the authentication type.
choices: ['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple']
auth_text_simple:
description:
- Specifies a password for simple authentication.
The value is a string of 1 to 8 characters.
auth_key_id:
description:
- Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5.
Valid value is an integer is in the range from 1 to 255.
auth_text_md5:
description:
- Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication.
The value is a string of 1 to 255 case-sensitive characters, spaces not supported.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
"""
EXAMPLES = '''
- name: eth_trunk module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Enables OSPF and sets the cost on an interface
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
cost: 100
provider: '{{ cli }}'
- name: Sets the dead interval of the OSPF neighbor
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
dead_interval: 100
provider: '{{ cli }}'
- name: Sets the interval for sending Hello packets on an interface
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
hello_interval: 2
provider: '{{ cli }}'
- name: Disables an interface from receiving and sending OSPF packets
ce_interface_ospf:
interface: 10GE1/0/30
process_id: 1
area: 100
silent_interface: true
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30", "cost": "100"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "0.0.0.100", "interface": "10GE1/0/30",
"cost": "100", "dead_interval": "40", "hello_interval": "10",
"silent_interface": "false", "auth_mode": "none"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface 10GE1/0/30",
"ospf enable 1 area 0.0.0.100",
"ospf cost 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_OSPF = """
<filter type="subtree">
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<routerId></routerId>
<vrfName></vrfName>
<areas>
<area>
<areaId>%s</areaId>
<interfaces>
<interface>
<ifName>%s</ifName>
<networkType></networkType>
<helloInterval></helloInterval>
<deadInterval></deadInterval>
<silentEnable></silentEnable>
<configCost></configCost>
<authenticationMode></authenticationMode>
<authTextSimple></authTextSimple>
<keyId></keyId>
<authTextMd5></authTextMd5>
</interface>
</interfaces>
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</filter>
"""
CE_NC_XML_BUILD_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<areas>
<area>
<areaId>%s</areaId>
%s
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_XML_BUILD_MERGE_INTF = """
<interfaces>
<interface operation="merge">
%s
</interface>
</interfaces>
"""
CE_NC_XML_BUILD_DELETE_INTF = """
<interfaces>
<interface operation="delete">
%s
</interface>
</interfaces>
"""
CE_NC_XML_SET_IF_NAME = """
<ifName>%s</ifName>
"""
CE_NC_XML_SET_HELLO = """
<helloInterval>%s</helloInterval>
"""
CE_NC_XML_SET_DEAD = """
<deadInterval>%s</deadInterval>
"""
CE_NC_XML_SET_SILENT = """
<silentEnable>%s</silentEnable>
"""
CE_NC_XML_SET_COST = """
<configCost>%s</configCost>
"""
CE_NC_XML_SET_AUTH_MODE = """
<authenticationMode>%s</authenticationMode>
"""
CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """
<authTextSimple>%s</authTextSimple>
"""
CE_NC_XML_SET_AUTH_MD5 = """
<keyId>%s</keyId>
<authTextMd5>%s</authTextMd5>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
return 'ge'
elif interface.upper().startswith('10GE'):
return '10ge'
elif interface.upper().startswith('25GE'):
return '25ge'
elif interface.upper().startswith('4X10GE'):
return '4x10ge'
elif interface.upper().startswith('40GE'):
return '40ge'
elif interface.upper().startswith('100GE'):
return '100ge'
elif interface.upper().startswith('VLANIF'):
return 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
return 'loopback'
elif interface.upper().startswith('METH'):
return 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
return 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
return 'vbdif'
elif interface.upper().startswith('NVE'):
return 'nve'
elif interface.upper().startswith('TUNNEL'):
return 'tunnel'
elif interface.upper().startswith('ETHERNET'):
return 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
return 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
return 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
return 'stack-port'
elif interface.upper().startswith('NULL'):
return 'null'
else:
return None
def is_valid_v4addr(addr):
"""check is ipv4 addr is valid"""
if not addr:
return False
if addr.find('.') != -1:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
class InterfaceOSPF(object):
"""
Manages configuration of an OSPF interface instance.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.interface = self.module.params['interface']
self.process_id = self.module.params['process_id']
self.area = self.module.params['area']
self.cost = self.module.params['cost']
self.hello_interval = self.module.params['hello_interval']
self.dead_interval = self.module.params['dead_interval']
self.silent_interface = self.module.params['silent_interface']
self.auth_mode = self.module.params['auth_mode']
self.auth_text_simple = self.module.params['auth_text_simple']
self.auth_key_id = self.module.params['auth_key_id']
self.auth_text_md5 = self.module.params['auth_text_md5']
self.state = self.module.params['state']
# ospf info
self.ospf_info = dict()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
"""init module"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_area_ip(self):
"""convert integer to ip address"""
if not self.area.isdigit():
return self.area
addr_int = ['0'] * 4
addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF)
addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF)
addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF)
addr_int[3] = str(int(self.area) & 0xFF)
return '.'.join(addr_int)
def get_ospf_dict(self):
""" get one ospf attributes dict."""
ospf_info = dict()
conf_str = CE_NC_GET_OSPF % (
self.process_id, self.get_area_ip(), self.interface)
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return ospf_info
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get process base info
root = ElementTree.fromstring(xml_str)
ospfsite = root.find("ospfv2/ospfv2comm/ospfSites/ospfSite")
if not ospfsite:
self.module.fail_json(msg="Error: ospf process does not exist.")
for site in ospfsite:
if site.tag in ["processId", "routerId", "vrfName"]:
ospf_info[site.tag] = site.text
# get areas info
ospf_info["areaId"] = ""
areas = root.find(
"ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area")
if areas:
for area in areas:
if area.tag == "areaId":
ospf_info["areaId"] = area.text
break
# get interface info
ospf_info["interface"] = dict()
intf = root.find(
"ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area/interfaces/interface")
if intf:
for attr in intf:
if attr.tag in ["ifName", "networkType",
"helloInterval", "deadInterval",
"silentEnable", "configCost",
"authenticationMode", "authTextSimple",
"keyId", "authTextMd5"]:
ospf_info["interface"][attr.tag] = attr.text
return ospf_info
def set_ospf_interface(self):
"""set interface ospf enable, and set its ospf attributes"""
xml_intf = CE_NC_XML_SET_IF_NAME % self.interface
# ospf view
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
if self.silent_interface:
xml_intf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower()
if self.silent_interface:
self.updates_cmd.append("silent-interface %s" % self.interface)
else:
self.updates_cmd.append("undo silent-interface %s" % self.interface)
# interface view
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("ospf enable %s area %s" % (
self.process_id, self.get_area_ip()))
if self.cost:
xml_intf += CE_NC_XML_SET_COST % self.cost
self.updates_cmd.append("ospf cost %s" % self.cost)
if self.hello_interval:
xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval
self.updates_cmd.append("ospf timer hello %s" %
self.hello_interval)
if self.dead_interval:
xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval
self.updates_cmd.append("ospf timer dead %s" % self.dead_interval)
if self.auth_mode:
xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo ospf authentication-mode")
else:
self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s"
% (self.auth_mode, self.auth_text_simple))
elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id:
xml_intf += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s %s"
% (self.auth_mode, self.auth_key_id, self.auth_text_md5))
else:
pass
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
(CE_NC_XML_BUILD_MERGE_INTF % xml_intf))
self.netconf_set_config(xml_str, "SET_INTERFACE_OSPF")
self.changed = True
def merge_ospf_interface(self):
"""merge interface ospf attributes"""
intf_dict = self.ospf_info["interface"]
# ospf view
xml_ospf = ""
if intf_dict.get("silentEnable") != str(self.silent_interface).lower():
xml_ospf += CE_NC_XML_SET_SILENT % str(self.silent_interface).lower()
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
if self.silent_interface:
self.updates_cmd.append("silent-interface %s" % self.interface)
else:
self.updates_cmd.append("undo silent-interface %s" % self.interface)
# interface view
xml_intf = ""
self.updates_cmd.append("interface %s" % self.interface)
if self.cost and intf_dict.get("configCost") != self.cost:
xml_intf += CE_NC_XML_SET_COST % self.cost
self.updates_cmd.append("ospf cost %s" % self.cost)
if self.hello_interval and intf_dict.get("helloInterval") != self.hello_interval:
xml_intf += CE_NC_XML_SET_HELLO % self.hello_interval
self.updates_cmd.append("ospf timer hello %s" %
self.hello_interval)
if self.dead_interval and intf_dict.get("deadInterval") != self.dead_interval:
xml_intf += CE_NC_XML_SET_DEAD % self.dead_interval
self.updates_cmd.append("ospf timer dead %s" % self.dead_interval)
if self.auth_mode:
# NOTE: for security, authentication config will always be update
xml_intf += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo ospf authentication-mode")
else:
self.updates_cmd.append("ospf authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_intf += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s"
% (self.auth_mode, self.auth_text_simple))
elif self.auth_mode in ["hmac-sha256", "md5", "hmac-md5"] and self.auth_key_id:
xml_intf += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append("ospf authentication-mode %s %s %s"
% (self.auth_mode, self.auth_key_id, self.auth_text_md5))
else:
pass
if not xml_intf:
self.updates_cmd.pop() # remove command: interface
if not xml_ospf and not xml_intf:
return
xml_sum = CE_NC_XML_SET_IF_NAME % self.interface
xml_sum += xml_ospf + xml_intf
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
(CE_NC_XML_BUILD_MERGE_INTF % xml_sum))
self.netconf_set_config(xml_str, "MERGE_INTERFACE_OSPF")
self.changed = True
def unset_ospf_interface(self):
"""set interface ospf disable, and all its ospf attributes will be removed"""
intf_dict = self.ospf_info["interface"]
xml_sum = ""
xml_intf = CE_NC_XML_SET_IF_NAME % self.interface
if intf_dict.get("silentEnable") == "true":
xml_sum += CE_NC_XML_BUILD_MERGE_INTF % (
xml_intf + (CE_NC_XML_SET_SILENT % "false"))
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
self.updates_cmd.append(
"undo silent-interface %s" % self.interface)
xml_sum += CE_NC_XML_BUILD_DELETE_INTF % xml_intf
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id,
self.get_area_ip(),
xml_sum)
self.netconf_set_config(xml_str, "DELETE_INTERFACE_OSPF")
self.updates_cmd.append("undo ospf cost")
self.updates_cmd.append("undo ospf timer hello")
self.updates_cmd.append("undo ospf timer dead")
self.updates_cmd.append("undo ospf authentication-mode")
self.updates_cmd.append("undo ospf enable %s area %s" % (
self.process_id, self.get_area_ip()))
self.changed = True
def check_params(self):
"""Check all input params"""
self.interface = self.interface.replace(" ", "").upper()
# interface check
if not get_interface_type(self.interface):
self.module.fail_json(msg="Error: interface is invalid.")
# process_id check
if not self.process_id.isdigit():
self.module.fail_json(msg="Error: process_id is not digit.")
if int(self.process_id) < 1 or int(self.process_id) > 4294967295:
self.module.fail_json(msg="Error: process_id must be an integer between 1 and 4294967295.")
# area check
if self.area.isdigit():
if int(self.area) < 0 or int(self.area) > 4294967295:
self.module.fail_json(msg="Error: area id (Integer) must be between 0 and 4294967295.")
else:
if not is_valid_v4addr(self.area):
self.module.fail_json(msg="Error: area id is invalid.")
# area authentication check
if self.state == "present":
if self.auth_mode:
if self.auth_mode == "simple":
if self.auth_text_simple and len(self.auth_text_simple) > 8:
self.module.fail_json(
msg="Error: auth_text_simple is not in the range from 1 to 8.")
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
if self.auth_key_id and not self.auth_text_md5:
self.module.fail_json(
msg='Error: auth_key_id and auth_text_md5 should be set at the same time.')
if not self.auth_key_id and self.auth_text_md5:
self.module.fail_json(
msg='Error: auth_key_id and auth_text_md5 should be set at the same time.')
if self.auth_key_id:
if not self.auth_key_id.isdigit():
self.module.fail_json(
msg="Error: auth_key_id is not digit.")
if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255:
self.module.fail_json(
msg="Error: auth_key_id is not in the range from 1 to 255.")
if self.auth_text_md5 and len(self.auth_text_md5) > 255:
self.module.fail_json(
msg="Error: auth_text_md5 is not in the range from 1 to 255.")
# cost check
if self.cost:
if not self.cost.isdigit():
self.module.fail_json(msg="Error: cost is not digit.")
if int(self.cost) < 1 or int(self.cost) > 65535:
self.module.fail_json(
msg="Error: cost is not in the range from 1 to 65535")
# hello_interval check
if self.hello_interval:
if not self.hello_interval.isdigit():
self.module.fail_json(
msg="Error: hello_interval is not digit.")
if int(self.hello_interval) < 1 or int(self.hello_interval) > 65535:
self.module.fail_json(
msg="Error: hello_interval is not in the range from 1 to 65535")
# dead_interval check
if self.dead_interval:
if not self.dead_interval.isdigit():
self.module.fail_json(msg="Error: dead_interval is not digit.")
if int(self.dead_interval) < 1 or int(self.dead_interval) > 235926000:
self.module.fail_json(
msg="Error: dead_interval is not in the range from 1 to 235926000")
def get_proposed(self):
"""get proposed info"""
self.proposed["interface"] = self.interface
self.proposed["process_id"] = self.process_id
self.proposed["area"] = self.get_area_ip()
self.proposed["cost"] = self.cost
self.proposed["hello_interval"] = self.hello_interval
self.proposed["dead_interval"] = self.dead_interval
self.proposed["silent_interface"] = self.silent_interface
if self.auth_mode:
self.proposed["auth_mode"] = self.auth_mode
if self.auth_mode == "simple":
self.proposed["auth_text_simple"] = self.auth_text_simple
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
self.proposed["auth_key_id"] = self.auth_key_id
self.proposed["auth_text_md5"] = self.auth_text_md5
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.ospf_info:
return
if self.ospf_info["interface"]:
self.existing["interface"] = self.interface
self.existing["cost"] = self.ospf_info["interface"].get("configCost")
self.existing["hello_interval"] = self.ospf_info["interface"].get("helloInterval")
self.existing["dead_interval"] = self.ospf_info["interface"].get("deadInterval")
self.existing["silent_interface"] = self.ospf_info["interface"].get("silentEnable")
self.existing["auth_mode"] = self.ospf_info["interface"].get("authenticationMode")
self.existing["auth_text_simple"] = self.ospf_info["interface"].get("authTextSimple")
self.existing["auth_key_id"] = self.ospf_info["interface"].get("keyId")
self.existing["auth_text_md5"] = self.ospf_info["interface"].get("authTextMd5")
self.existing["process_id"] = self.ospf_info["processId"]
self.existing["area"] = self.ospf_info["areaId"]
def get_end_state(self):
"""get end state info"""
ospf_info = self.get_ospf_dict()
if not ospf_info:
return
if ospf_info["interface"]:
self.end_state["interface"] = self.interface
self.end_state["cost"] = ospf_info["interface"].get("configCost")
self.end_state["hello_interval"] = ospf_info["interface"].get("helloInterval")
self.end_state["dead_interval"] = ospf_info["interface"].get("deadInterval")
self.end_state["silent_interface"] = ospf_info["interface"].get("silentEnable")
self.end_state["auth_mode"] = ospf_info["interface"].get("authenticationMode")
self.end_state["auth_text_simple"] = ospf_info["interface"].get("authTextSimple")
self.end_state["auth_key_id"] = ospf_info["interface"].get("keyId")
self.end_state["auth_text_md5"] = ospf_info["interface"].get("authTextMd5")
self.end_state["process_id"] = ospf_info["processId"]
self.end_state["area"] = ospf_info["areaId"]
def work(self):
"""worker"""
self.check_params()
self.ospf_info = self.get_ospf_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
if self.state == "present":
if not self.ospf_info or not self.ospf_info["interface"]:
# create ospf area and set interface config
self.set_ospf_interface()
else:
# merge interface ospf area config
self.merge_ospf_interface()
else:
if self.ospf_info and self.ospf_info["interface"]:
# delete interface ospf area config
self.unset_ospf_interface()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
process_id=dict(required=True, type='str'),
area=dict(required=True, type='str'),
cost=dict(required=False, type='str'),
hello_interval=dict(required=False, type='str'),
dead_interval=dict(required=False, type='str'),
silent_interface=dict(required=False, default=False, type='bool'),
auth_mode=dict(required=False,
choices=['none', 'null', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'),
auth_text_simple=dict(required=False, type='str', no_log=True),
auth_key_id=dict(required=False, type='str'),
auth_text_md5=dict(required=False, type='str', no_log=True),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = InterfaceOSPF(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
christoph-buente/phantomjs | src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_test_utils.py | 227 | 10685 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'gtest_source_dir': os.path.dirname(sys.argv[0]),
'gtest_build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('gtest_source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('gtest_build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --gtest_build_dir flag or the GTEST_BUILD_DIR\n'
'environment variable. For convenient use, invoke this script via\n'
'mk_test.py.\n'
# TODO([email protected]): change mk_test.py to test.py after renaming
# the file.
'Please run mk_test.py -h for help.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
signed/intellij-community | python/helpers/py3only/docutils/languages/da.py | 50 | 1872 | # -*- coding: utf-8 -*-
# $Id: da.py 7678 2013-07-03 09:57:36Z milde $
# Author: E D
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Danish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Forfatter',
'authors': 'Forfattere',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Dato',
'copyright': 'Copyright',
'dedication': 'Dedikation',
'abstract': 'Resumé',
'attention': 'Giv agt!',
'caution': 'Pas på!',
'danger': '!FARE!',
'error': 'Fejl',
'hint': 'Vink',
'important': 'Vigtigt',
'note': 'Bemærk',
'tip': 'Tips',
'warning': 'Advarsel',
'contents': 'Indhold'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'forfatter': 'author',
'forfattere': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'dato': 'date',
'copyright': 'copyright',
'dedikation': 'dedication',
'resume': 'abstract',
'resumé': 'abstract'}
"""Danish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
elit3ge/SickRage | lib/sqlalchemy/orm/mapper.py | 75 | 108686 | # orm/mapper.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
import types
import weakref
from itertools import chain
from collections import deque
from .. import sql, util, log, exc as sa_exc, event, schema, inspection
from ..sql import expression, visitors, operators, util as sql_util
from . import instrumentation, attributes, exc as orm_exc, loading
from . import properties
from .interfaces import MapperProperty, _InspectionAttr, _MappedAttribute
from .base import _class_to_mapper, _state_mapper, class_mapper, \
state_str, _INSTRUMENTOR
from .path_registry import PathRegistry
import sys
_mapper_registry = weakref.WeakKeyDictionary()
_already_compiling = False
_memoized_configured_property = util.group_expirable_memoized_property()
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE')
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(_InspectionAttr):
"""Define the correlation of class attributes to database table
columns.
The :class:`.Mapper` object is instantiated using the
:func:`~sqlalchemy.orm.mapper` function. For information
about instantiating new :class:`.Mapper` objects, see
that function's documentation.
When :func:`.mapper` is used
explicitly to link a user defined class with table
metadata, this is referred to as *classical mapping*.
Modern SQLAlchemy usage tends to favor the
:mod:`sqlalchemy.ext.declarative` extension for class
configuration, which
makes usage of :func:`.mapper` behind the scenes.
Given a particular class known to be mapped by the ORM,
the :class:`.Mapper` which maintains it can be acquired
using the :func:`.inspect` function::
from sqlalchemy import inspect
mapper = inspect(MyClass)
A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
extension will also have its mapper available via the ``__mapper__``
attribute.
"""
_new_mappers = False
def __init__(self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
extension=None,
order_by=False,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
"""Return a new :class:`~.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
.. seealso::
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`.Table` produced as a result of the ``__tablename__``
and :class:`.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the case
where database ON DELETE CASCADE rules may be deleting some of those
rows automatically. The warning may be changed to an exception
in a future release.
.. versionadded:: 0.9.4 - added :paramref:`.mapper.confirm_deleted_rows`
as well as conditional matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will be
returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension` instances which will be applied
to all operations by this :class:`.Mapper`. **Deprecated.**
Please see :class:`.MapperEvents`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding :class:`.Mapper`
of one indicating a superclass to which this :class:`.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and the
columns present are missing a :class:`.ForeignKey` configuration,
this parameter can be used to specify which columns are "foreign".
In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is auto-expunged
when it is de-associated with *any* of its parents that specify
``delete-orphan`` cascade. This behavior is more consistent with
that of a persistent object, and allows behavior to be consistent
in more scenarios independently of whether or not an orphanable
object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
.. versionadded:: 0.8 - the consideration of a pending object as
an "orphan" has been modified to more closely match the
behavior as that of persistent objects, which is that the object
is expunged from the :class:`.Session` as soon as it is
de-associated from any of its orphan-enabled parents. Previously,
the pending object would be expunged only if de-associated
from all of its orphan-enabled parents. The new flag ``legacy_is_orphan``
is added to :func:`.orm.mapper` which re-establishes the
legacy behavior.
:param non_primary: Specify that this :class:`.Mapper` is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
:paramref:`.Mapper.non_primary` is not an often used option, but
is useful in some specific :func:`.relationship` cases.
.. seealso::
:ref:`relationship_non_primary_mapper`
:param order_by: A single :class:`.Column` or list of :class:`.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`.relationship`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`.Column` object that's
present in the mapped :class:`.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
.. versionchanged:: 0.7.4
``polymorphic_on`` may be specified as a SQL expression,
or refer to any attribute configured with
:func:`.column_property`, or to the string name of one.
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that :class:`.Column`
objects present in
the mapped :class:`.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`.Column` objects which define the
primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id generator
may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports server-side
version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying techniques.
"""
self.class_ = util.assert_arg_type(class_, type, 'class_')
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
if order_by is not False:
self.order_by = util.to_list(order_by)
else:
self.order_by = order_by
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
self.local_table = local_table
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = expression._clause_element_as_expr(
polymorphic_on)
self._dependency_processors = []
self.validators = util.immutabledict()
self.passive_updates = passive_updates
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self._deprecated_extensions = util.to_list(extension or [])
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
self.configured = False
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
_CONFIGURE_MUTEX.acquire()
try:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_legacy_instrument_class()
self._configure_class_instrumentation()
self._configure_listeners()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
Mapper._new_mappers = True
self._log("constructed")
self._expire_memoizations()
finally:
_CONFIGURE_MUTEX.release()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
@property
def entity(self):
"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`.Selectable` which this :class:`.Mapper` manages.
Typically is an instance of :class:`.Table` or :class:`.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`~.Mapper.mapped_table`.
"""
mapped_table = None
"""The :class:`.Selectable` to which this :class:`.Mapper` is mapped.
Typically an instance of :class:`.Table`, :class:`.Join`, or
:class:`.Alias`.
The "mapped" table is the selectable that
the mapper selects from during queries. For non-inheriting
mappers, the mapped table is the same as the "local" table.
For joined-table inheritance mappers, mapped_table references the
full :class:`.Join` representing full rows for this particular
subclass. For single-table inheritance mappers, mapped_table
references the base table.
.. seealso::
:attr:`~.Mapper.local_table`.
"""
inherits = None
"""References the :class:`.Mapper` which this :class:`.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = None
"""Represent ``True`` if this :class:`.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`.Table` objects
which this :class:`.Mapper` is aware of.
If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias`
representing a :class:`.Select`, the individual :class:`.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`.Column` objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`.Mapper`.
This list is against the selectable in :attr:`~.Mapper.mapped_table`. In
the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a :class:`.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`.Mapper`
features a ``primary_key`` argument that can override what the
:class:`.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`.Mapper` is a single table
inheritance mapper.
:attr:`~.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to selet rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`~.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`~.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`~.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`.Mapper`. In an inheritance scenario, it references
the :class:`.Mapper` which is parent to all other :class:`.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`.Column` or other scalar expression
objects maintained by this :class:`.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`.Table` object, except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`~.orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`~.Mapper.columns`."""
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inherting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'" %
(self.class_.__name__, self.inherits.class_.__name__))
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper" %
(np, self.class_.__name__, np))
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.mapped_table = self.inherits.mapped_table
self.single = True
elif not self.local_table is self.inherits.local_table:
if self.concrete:
self.mapped_table = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we dont
# want (allows test/inheritance.InheritTest4 to pass)
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table,
self.local_table)
self.mapped_table = sql.join(
self.inherits.mapped_table,
self.local_table,
self.inherit_condition)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = sql_util.criterion_as_pairs(
self.mapped_table.onclause,
consider_as_foreign_keys=fks)
else:
self.mapped_table = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif self.inherits.version_id_col is not None and \
self.version_id_col is not self.inherits.version_id_col:
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning." %
(self.version_id_col.description,
self.inherits.version_id_col.description)
)
if self.order_by is False and \
not self.concrete and \
self.inherits.order_by is not False:
self.order_by = self.inherits.order_by
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
else:
self._all_tables = set()
self.base_mapper = self
self.mapped_table = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.mapped_table is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a mapped_table specified."
% self)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == '*':
self.with_polymorphic = ('*', None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(with_polymorphic[0], util.string_types + (tuple, list)):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if isinstance(self.local_table, expression.SelectBase):
raise sa_exc.InvalidRequestError(
"When mapping against a select() construct, map against "
"an alias() of the construct instead."
"This because several databases don't allow a "
"SELECT from a subquery that does not have an alias."
)
if self.with_polymorphic and \
isinstance(self.with_polymorphic[1],
expression.SelectBase):
self.with_polymorphic = (self.with_polymorphic[0],
self.with_polymorphic[1].alias())
if self.configured:
self._expire_memoizations()
def _set_concrete_base(self, mapper):
"""Set the given :class:`.Mapper` as the 'inherits' for this
:class:`.Mapper`, assuming this :class:`.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_legacy_instrument_class(self):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_instrument_class(self, ext)
def _configure_listeners(self):
if self.inherits:
super_extensions = set(
chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
for ext in self._deprecated_extensions:
if ext not in super_extensions:
ext._adapt_listener(self, ext)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class_ and entity name.
Subsequent calls to ``class_mapper()`` for the class_/entity
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_)
self.class_manager = manager
self._identity_class = manager.mapper._identity_class
_mapper_registry[self] = True
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
"Use non_primary=True to "
"create a non primary Mapper. clear_mappers() will "
"remove *all* current mappers from all classes." %
self.class_)
#else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
self.dispatch.instrument_class(self, self.class_)
if manager is None:
manager = instrumentation.register_class(self.class_)
self.class_manager = manager
manager.mapper = self
manager.deferred_scalar_loader = util.partial(
loading.load_scalar_attributes, self)
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
event.listen(manager, 'first_init', _event_on_first_init, raw=True)
event.listen(manager, 'init', _event_on_init, raw=True)
event.listen(manager, 'resurrect', _event_on_resurrect, raw=True)
for key, method in util.iterate_attributes(self.class_):
if isinstance(method, types.FunctionType):
if hasattr(method, '__sa_reconstructor__'):
self._reconstructor = method
event.listen(manager, 'load', _event_on_load, raw=True)
elif hasattr(method, '__sa_validators__'):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
manager.info[_INSTRUMENTOR] = self
@classmethod
def _configure_all(cls):
"""Class-level path to the :func:`.configure_mappers` call.
"""
configure_mappers()
def dispose(self):
# Disable any attribute-based compilation.
self.configured = True
if hasattr(self, '_configure_failed'):
del self._configure_failed
if not self.non_primary and \
self.class_manager is not None and \
self.class_manager.is_mapped and \
self.class_manager.mapper is self:
instrumentation.unregister_class(self.class_)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.mapped_table)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(chain(*[
col.proxy_set for col in
self._columntoproperty]))
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.mapped_table])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = \
util.ordered_column_set(t.primary_key).\
intersection(pk_cols)
self._cols_by_table[t] = \
util.ordered_column_set(t.c).\
intersection(all_cols)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if not hasattr(col, 'table') or
col.table not in self._cols_by_table)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif self.mapped_table not in self._pks_by_table or \
len(self._pks_by_table[self.mapped_table]) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
elif self.local_table not in self._pks_by_table and \
isinstance(self.local_table, schema.Table):
util.warn("Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description)
if self.inherits and \
not self.concrete and \
not self._primary_key_argument:
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or mapped_table pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sql_util.reduce_columns(
[self.mapped_table.corresponding_column(c) for c in
self._primary_key_argument],
ignore_nonexistent_tables=True)
else:
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.mapped_table],
ignore_nonexistent_tables=True)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
self.columns = self.c = util.OrderedProperties()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.mapped_table.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or '') + column.key
if self._should_exclude(
column.key, column_key,
local=self.local_table.c.contains_column(column),
column=column
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(column_key,
column,
init=False,
setparent=True)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as as string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError:
raise sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
polymorphic_key = prop.key
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(self.polymorphic_on,
properties.ColumnProperty):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on")
prop = self.polymorphic_on
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
elif not expression._is_column(self.polymorphic_on):
# polymorphic_on is not a Column and not a ColumnProperty;
# not supported right now.
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's mapped_table
col = self.mapped_table.corresponding_column(
self.polymorphic_on)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either mapped_table or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None or \
self.with_polymorphic[1].\
corresponding_column(col) is None
):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, 'key', None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" %
col.key)
else:
self.polymorphic_on = col = \
col.label("_sa_polymorphic_on")
key = col.key
self._configure_property(
key,
properties.ColumnProperty(col,
_instrument=instrument),
init=init, setparent=True)
polymorphic_key = key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.mapped_table is mapper.mapped_table:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = \
self.mapped_table.corresponding_column(
mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = \
mapper._set_polymorphic_identity
self._validate_polymorphic_identity = \
mapper._validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(state, dict_,
state.manager.mapper.polymorphic_identity, None)
def _validate_polymorphic_identity(mapper, state, dict_):
if polymorphic_key in dict_ and \
dict_[polymorphic_key] not in \
mapper._acceptable_polymorphic_identities:
util.warn(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly" % (
state_str(state),
dict_[polymorphic_key]
)
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = _validate_polymorphic_identity
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@_memoized_configured_property
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@_memoized_configured_property
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.mapped_table is self.mapped_table:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
def _adapt_inherited_property(self, key, prop, init):
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
init=init, setparent=True)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.mapped_table.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.mapped_table._reset_exported()
col = self.mapped_table.corresponding_column(
prop.columns[0])
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, '_readonly_props') and \
(not hasattr(col, 'table') or
col.table not in self._cols_by_table):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if hasattr(self, '_cols_by_table') and \
col.table in self._cols_by_table and \
col not in self._cols_by_table[col.table]:
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, '_is_polymorphic_discriminator'):
prop._is_polymorphic_discriminator = \
(col is self.polymorphic_on or
prop.columns[0] is self.polymorphic_on)
self.columns[key] = col
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and \
getattr(self._props[key], '_mapped_by_synonym', False):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if key in self._props and \
not isinstance(prop, properties.ColumnProperty) and \
not isinstance(self._props[key], properties.ColumnProperty):
util.warn("Property %s on %s being replaced with new "
"property %s; the old property will be discarded" % (
self._props[key],
self,
prop,
))
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProprerty` given a
:class:`.Column` object. """
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
if not expression._is_column(column):
raise sa_exc.ArgumentError(
"%s=%r is not an instance of MapperProperty or Column"
% (key, prop))
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if prop.parent is self:
raise sa_exc.InvalidRequestError(
"Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly."
% (prop.columns[-1], column, key))
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log("inserting column to existing list "
"in properties.ColumnProperty %s" % (key))
return prop
elif prop is None or isinstance(prop,
properties.ConcreteInheritedProperty):
mapped_column = []
for c in columns:
mc = self.mapped_table.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.mapped_table._reset_exported()
mc = self.mapped_table.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c))
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." %
(key, self, column.key, prop))
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
_memoized_configured_property.expire_instance(mapper)
@property
def _log_desc(self):
return "(" + self.class_.__name__ + \
"|" + \
(self.local_table is not None and
self.local_table.description or
str(self.local_table)) +\
(self.non_primary and
"|non-primary" or "") + ")"
def _log(self, msg, *args):
self.logger.info(
"%s " + msg, *((self._log_desc,) + args)
)
def _log_debug(self, msg, *args):
self.logger.debug(
"%s " + msg, *((self._log_desc,) + args)
)
def __repr__(self):
return '<Mapper at 0x%x; %s>' % (
id(self), self.class_.__name__)
def __str__(self):
return "Mapper|%s|%s%s" % (
self.class_.__name__,
self.local_table is not None and
self.local_table.description or None,
self.non_primary and "|non-primary" or ""
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key.
"""
if _configure_mappers and Mapper._new_mappers:
configure_mappers()
try:
return self._props[key]
except KeyError:
raise sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key))
def get_property_by_column(self, column):
"""Given a :class:`.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
if Mapper._new_mappers:
configure_mappers()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == '*':
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" %
(m, self))
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(sql_util.find_tables(selectable,
include_aliases=True))
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.mapped_table
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used.")
elif not m.single:
if innerjoin:
from_obj = from_obj.join(m.local_table,
m.inherit_condition)
else:
from_obj = from_obj.outerjoin(m.local_table,
m.inherit_condition)
return from_obj
@_memoized_configured_property
def _single_table_criterion(self):
if self.single and \
self.inherits and \
self.polymorphic_on is not None:
return self.polymorphic_on.in_(
m.polymorphic_identity
for m in self.self_and_descendants)
else:
return None
@_memoized_configured_property
def _with_polymorphic_mappers(self):
if Mapper._new_mappers:
configure_mappers()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@_memoized_configured_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.mapped_table
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable),
False)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`.Mapper` objects included in the
default "polymorphic" query.
"""
@property
def selectable(self):
"""The :func:`.select` construct this :class:`.Mapper` selects from
by default.
Normally, this is equivalent to :attr:`.mapped_table`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(self, spec=None, selectable=False,
innerjoin=False):
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers,
innerjoin)
@_memoized_configured_property
def _polymorphic_properties(self):
return list(self._iterate_polymorphic_properties(
self._with_polymorphic_mappers))
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(*[
list(mapper.iterate_properties) for mapper in
[self] + mappers
])
):
if getattr(c, '_is_polymorphic_discriminator', False) and \
(self.polymorphic_on is None or
c.columns[0] is not self.polymorphic_on):
continue
yield c
@util.memoized_property
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(self._props)
@util.memoized_property
def all_orm_descriptors(self):
"""A namespace of all :class:`._InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors` associated
with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`._InspectionAttr`. This includes :class:`.QueryableAttribute`,
as well as extension types such as :class:`.hybrid_property`,
:class:`.hybrid_method` and :class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`._InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when referring
to the collection of mapped properties via :attr:`.Mapper.attrs`.
.. versionadded:: 0.8.0
.. seealso::
:attr:`.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes()))
@_memoized_configured_property
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.SynonymProperty)
@_memoized_configured_property
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@_memoized_configured_property
def relationships(self):
"""Return a namespace of all :class:`.RelationshipProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.RelationshipProperty)
@_memoized_configured_property
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`.Mapper`.
.. seealso::
:attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.CompositeProperty)
def _filter_properties(self, type_):
if Mapper._new_mappers:
configure_mappers()
return util.ImmutableProperties(util.OrderedDict(
(k, v) for k, v in self._props.items()
if isinstance(v, type_)
))
@_memoized_configured_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [(primary_key, sql.bindparam(None, type_=primary_key.type))
for primary_key in self.primary_key]
return sql.and_(*[k == v for (k, v) in params]), \
util.column_dict(params)
@_memoized_configured_property
def _equivalent_columns(self):
"""Create a map of all *equivalent* columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, i.e.
{
tablea.col1:
set([tableb.col1, tablec.col1]),
tablea.col2:
set([tabled.col2])
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {},
{'binary': visit_binary})
return result
def _is_userland_descriptor(self, obj):
if isinstance(obj, (_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement)):
return False
else:
return True
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
if local:
if self.class_.__dict__.get(assigned_name, None) is not None \
and self._is_userland_descriptor(
self.class_.__dict__[assigned_name]):
return True
else:
if getattr(self.class_, assigned_name, None) is not None \
and self._is_userland_descriptor(
getattr(self.class_, assigned_name)):
return True
if self.include_properties is not None and \
name not in self.include_properties and \
(column is None or column not in self.include_properties):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and \
(
name in self.exclude_properties or \
(column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@_memoized_configured_property
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def identity_key_from_row(self, row, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.RowProxy` instance. The columns which are mapped
by this :class:`.Mapper` should be locatable in the row, preferably
via the :class:`.Column` object directly (as is the case when a
:func:`.select` construct is executed), or via string names of the form
``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return self._identity_class, \
tuple(row[column] for column in pk_cols)
def identity_key_from_primary_key(self, primary_key):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key)
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
return self.identity_key_from_primary_key(
self.primary_key_from_instance(instance))
def _identity_key_from_state(self, state):
dict_ = state.dict
manager = state.manager
return self._identity_class, tuple([
manager[self._columntoproperty[col].key].\
impl.get(state, dict_, attributes.PASSIVE_OFF)
for col in self.primary_key
])
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
return self._primary_key_from_state(state)
def _primary_key_from_state(self, state):
dict_ = state.dict
manager = state.manager
return [
manager[self._columntoproperty[col].key].\
impl.get(state, dict_, attributes.PASSIVE_OFF)
for col in self.primary_key
]
def _get_state_attr_by_column(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(state, dict_, column)
def _get_committed_state_attr_by_column(self, state, dict_,
column, passive=attributes.PASSIVE_OFF):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.\
get_committed_value(state, dict_, passive=passive)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(chain(
*[sql_util.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns]
))
if self.base_mapper.local_table in tables:
return None
class ColumnsNotAvailable(Exception):
pass
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state, state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if leftval is attributes.PASSIVE_NO_RESULT or leftval is None:
raise ColumnsNotAvailable()
binary.left = sql.bindparam(None, leftval,
type_=binary.right.type)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state, state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if rightval is attributes.PASSIVE_NO_RESULT or \
rightval is None:
raise ColumnsNotAvailable()
binary.right = sql.bindparam(None, rightval,
type_=binary.right.type)
allconds = []
try:
start = False
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table, expression.TableClause):
return None
if start and not mapper.single:
allconds.append(visitors.cloned_traverse(
mapper.inherit_condition,
{},
{'binary': visit_binary}
)
)
except ColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in attribute_names:
cols.extend(props[key].columns)
return sql.select(cols, cond, use_labels=True)
def cascade_iterator(self, type_, state, halt_on=None):
"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type_:
The name of the cascade rule (i.e. save-update, delete,
etc.)
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
the return value are object instances; this provides a strong
reference so that they don't fall out of scope immediately.
"""
visited_states = set()
prp, mpp = object(), object()
visitables = deque([(deque(self._props.values()), prp,
state, state.dict)])
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(prop.cascade_iterator(type_, parent_state,
parent_dict, visited_states, halt_on))
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
instance, instance_mapper, corresponding_state, \
corresponding_dict = iterator.popleft()
yield instance, instance_mapper, \
corresponding_state, corresponding_dict
visitables.append((deque(instance_mapper._props.values()),
prp, corresponding_state,
corresponding_dict))
@_memoized_configured_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend([
(super_table, table)
for super_table in super_.tables
])
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's techinically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if parent is not None and \
dep is not None and \
dep is not parent and \
dep.inherit_condition is not None:
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(sql_util._find_columns(
parent.inherit_condition))
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and \
cols.intersection(
util.reduce(set.union,
[l.proxy_set for l, r in m._inherits_equated_pairs])
):
result[table].append((m, m._inherits_equated_pairs))
return result
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
most cases is handled internally.
"""
if not Mapper._new_mappers:
return
_CONFIGURE_MUTEX.acquire()
try:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
if not Mapper._new_mappers:
return
Mapper.dispatch(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
if getattr(mapper, '_configure_failed', False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Original exception was: %s"
% mapper._configure_failed)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(
mapper, mapper.class_)
except:
exc = sys.exc_info()[1]
if not hasattr(exc, '_configure_failed'):
mapper._configure_failed = exc
raise
Mapper._new_mappers = False
finally:
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
Mapper.dispatch(Mapper).after_configured()
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
.. versionadded:: 0.7.7
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop('include_removes', False)
include_backrefs = kw.pop('include_backrefs', True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.info[_INSTRUMENTOR]
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
"""
instrumenting_mapper = manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
if Mapper._new_mappers:
configure_mappers()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
def _event_on_resurrect(state):
# re-populate the primary key elements
# of the dict based on the mapping.
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)
if instrumenting_mapper:
for col, val in zip(instrumenting_mapper.primary_key, state.key[1]):
instrumenting_mapper._set_state_attr_by_column(
state, state.dict, col, val)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r" % (
column.table.name, column.name, column.key, prop))
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..." %
(column, self.mapper))
| gpl-3.0 |
iamdankaufman/beets | beetsplug/info.py | 2 | 2210 | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Shows file metadata.
"""
import os
from beets.plugins import BeetsPlugin
from beets import ui
from beets import mediafile
from beets import util
def info(paths):
# Set up fields to output.
fields = list(mediafile.MediaFile.fields())
fields.remove('art')
fields.remove('images')
# Line format.
other_fields = ['album art']
maxwidth = max(len(name) for name in fields + other_fields)
lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth)
first = True
for path in paths:
if not first:
ui.print_()
path = util.normpath(path)
if not os.path.isfile(path):
ui.print_(u'not a file: {0}'.format(
util.displayable_path(path)
))
continue
ui.print_(path)
try:
mf = mediafile.MediaFile(path)
except mediafile.UnreadableFileError:
ui.print_('cannot read file: {0}'.format(
util.displayable_path(path)
))
continue
# Basic fields.
for name in fields:
ui.print_(lineformat.format(name, getattr(mf, name)))
# Extra stuff.
ui.print_(lineformat.format('album art', mf.art is not None))
first = False
class InfoPlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand('info', help='show file metadata')
def func(lib, opts, args):
if not args:
raise ui.UserError('no file specified')
info(args)
cmd.func = func
return [cmd]
| mit |
anaruse/chainer | tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_nd.py | 1 | 12854 | import unittest
import functools
import math
import numpy
from operator import mul
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper
@testing.parameterize(*testing.product({
'dims': [(4,), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPoolingND(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
# Avoid unstability of numerical gradient
x_shape = (2, 3) + self.dims
self.x = numpy.arange(
functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)
self.x = 2 * self.x / self.x.size - 1
outs = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p)
in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
gy_shape = (2, 3) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, x_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'atol': 1e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, x_data, use_cudnn='always'):
dims = self.dims
ksize = self.ksize
stride = self.stride
pad = self.pad
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
patches = pooling_nd_helper.pooling_patches(
dims, ksize, stride, pad, self.cover_all)
for i in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[i, c]
expect = numpy.array([x[idx].max() for idx in patches])
expect = expect.reshape(y_data.shape[2:])
testing.assert_allclose(expect, y_data[i, c])
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, use_cudnn='never')
def test_forward_cpu_wide(self): # see #120
ndim = self.ndim
x_shape = (2, 3) + (15,) * ndim
x_data = numpy.random.rand(*x_shape).astype(self.dtype)
x = chainer.Variable(x_data)
ksize = stride = int(math.ceil(pow(32, 1.0 / ndim)))
functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.cudnn
@condition.retry(3)
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_forward_consistency_regression(self, x_data, use_cudnn='always'):
# Regression test to max_pooling_2d.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
testing.assert_allclose(y_nd.data, y_2d.data)
@condition.retry(3)
def test_forward_consistency_regression_cpu(self):
self.check_forward_consistency_regression(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_consistency_regression_gpu(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_consistency_regression_no_cudnn(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, y_grad, use_cudnn='always'):
def f(x):
return functions.max_pooling_nd(
x, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
f, x_data, y_grad, dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_backward_consistency_regression(self, x_data, gy_data,
use_cudnn='always'):
# Regression test to two-dimensional max pooling layer.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = cuda.get_array_module(x_data)
# Backward computation for N-dimensional max pooling layer.
x_nd = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_nd = functions.MaxPoolingND(self.ndim, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_nd = func_nd.apply((x_nd,))[0]
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional max pooling layer.
x_2d = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_2d = functions.MaxPooling2D(ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = func_2d.apply((x_2d,))[0]
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
testing.assert_allclose(x_nd.grad, x_2d.grad)
@condition.retry(3)
def test_backward_consistency_regression_cpu(self):
self.check_backward_consistency_regression(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_consistency_regression_gpu(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_consistency_regression_no_cudnn(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), use_cudnn='never')
def test_backward_cpu_more_than_once(self):
func = functions.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
func.apply((self.x,))
func.backward((self.x,), (self.gy,))
func.backward((self.x,), (self.gy,))
def check_double_backward(self, x_data, y_grad, x_grad_grad,
use_cudnn='always'):
def f(x):
y = functions.max_pooling_nd(
x, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
return y * y
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
dtype='d',
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx, 'never')
@attr.cudnn
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.cudnn
def test_double_backward_gpu_non_contiguous(self):
self.check_double_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))
@attr.gpu
def test_double_backward_gpu_no_cudnn(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
'never')
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch('cupy.cuda.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto') and
self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with testing.patch('cupy.cuda.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, expect)
class TestMaxPoolingNDIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(
2 * 3 * 4 * 4, dtype=numpy.float32).reshape(2, 3, 4, 4)
def _check(self, x):
out, indices = functions.max_pooling_nd(
x, 2, cover_all=False, return_indices=True)
assert isinstance(out, chainer.Variable)
assert isinstance(out.array, type(x))
assert isinstance(indices, type(x))
assert indices.shape == out.array.shape
# Calculate expected indices.
expect = numpy.zeros(indices.shape, dtype=indices.dtype)
for i in six.moves.range(2):
for c in six.moves.range(3):
xx = x[i, c]
expect[i, c] = numpy.array([
[xx[0:2, 0:2].ravel().argmax(),
xx[0:2, 2:4].ravel().argmax()],
[xx[2:4, 0:2].ravel().argmax(),
xx[2:4, 2:4].ravel().argmax()],
])
if out.xp is not numpy:
expect = cuda.to_gpu(expect)
assert (expect == indices).all()
def test_cpu(self):
self._check(self.x)
@attr.gpu
@attr.cudnn
def test_gpu(self):
x = cuda.to_gpu(self.x)
with chainer.using_config('use_cudnn', 'never'):
self._check(x)
with chainer.using_config('use_cudnn', 'always'):
self._check(x)
testing.run_module(__name__, __file__)
| mit |
brennanblue/svgplotlib | svgplotlib/Bar.py | 2 | 6406 | #!python -u
# -*- coding: utf-8 -*-
import sys
import itertools
from svgplotlib import Base
class Bar(Base):
"""
Simple vertical bar plot
Example::
graph = Bar(
(10,50,100),
width = 1000, height = 500,
titleColor = 'blue',
title = 'Simple bar plot',
xlabel = 'X axis',
ylabel = 'Y axis',
grid = True,
)
"""
def __init__(self, values, labels = None, colors = None, **kwargs):
super(Bar,self).__init__(**kwargs)
if labels is None:
labels = [str(i) for i in range(len(values))]
if colors is None:
colors = self.COLORS
grid = kwargs.get('grid', False)
titleColor = kwargs.get('titleColor', 'black')
titleScale = kwargs.get('titleScale', 1.25)
labelColor = kwargs.get('labelColor', 'black')
xlabelColor = kwargs.get('xlabelColor', 'black')
ylabelColor = kwargs.get('ylabelColor', 'black')
style = self.style = {
'stroke' : 'black',
'stroke-width' : '1',
'fill' : 'black',
}
textStyle = self.textStyle = {
'stroke' : 'none',
}
# plot area width and height
width = kwargs.get('width', 500)
height = kwargs.get('height', 500)
assert width > 0 and height > 0, 'width and height must be larger than 0'
aspect = float(width)/height
assert aspect > .2 and aspect < 5., 'aspect must be between .2 and 5'
self.plotWidth = width
self.plotHeight = height
# build yticks
miny = min(values)
maxy = max(values)
if miny == maxy:
miny -= 1
maxy += 1
maxNumSteps = kwargs.get('maxNumSteps', 5)
maxMinSteps = kwargs.get('maxMinSteps', 5)
y1, y2 = self.buildTicks(miny, maxy, maxNumSteps = maxNumSteps, maxMinSteps = maxMinSteps)
self.ymajorTicks, self.yminorTicks = y1, y2
# calculate scale
miny = self.miny = min(min(y1), min(y2 or (sys.maxint,)))
maxy = self.maxy = max(max(y1), max(y2 or (-sys.maxint,)))
self.yscale = self.plotHeight/(maxy - miny)
# main group
g = self.Group(**style)
# label size
delta = self.fontSize + 2*self.PAD
# find height
dy = .5*self.fontSize
title = unicode(kwargs.get('title', ''))
titleSize = None
if title:
titleSize = self.textSize(title)
dy += titleScale*(titleSize.height + titleSize.descent) + self.PAD
h = dy # Top line space
h += self.plotHeight # Plot area
h += delta # xaxis labels
xlabel = unicode(kwargs.get('xlabel', ''))
xlabelSize = None
if xlabel:
xlabelSize = self.textSize(xlabel)
h += xlabelSize.height + xlabelSize.descent + self.PAD
# find width
w = 0
dx = 0
ylabel = unicode(kwargs.get('ylabel', ''))
ylabelSize = None
if ylabel:
ylabelSize = self.textSize(ylabel)
dx += ylabelSize.height + ylabelSize.descent + 2*self.PAD
# yaxis labels
maxSize = 0
for y in self.ymajorTicks:
s = u"%g" % y
size = self.textSize(s)
maxSize = max(maxSize, size.width)
dx += maxSize + self.PAD
w += dx # side space
w += self.plotWidth # Plot area
w += delta + self.PAD
# set total size
self.set('width', w)
self.set('height', h)
# plot title and labels
if title:
xpos = .5*w - .5*titleScale*titleSize.width
ypos = .5*dy + .5*titleScale*titleSize.height - titleScale*titleSize.descent
g.EText(self.font, title, x = xpos, y = ypos, scale = titleScale,
fill = titleColor, **textStyle)
if xlabel:
xpos = .5*w - .5*xlabelSize.width
ypos = h - self.PAD
g.EText(self.font, xlabel, x = xpos, y = ypos, fill = xlabelColor, **textStyle)
if ylabel:
xpos = ylabelSize.height + ylabelSize.descent + self.PAD
ypos = dy + .5*self.plotHeight + .5*ylabelSize.width
g.EText(self.font, ylabel, x = xpos, y = ypos, rotation = -90,
fill = ylabelColor, **textStyle)
# create plot area
plotArea = self.plotArea = g.Group(transform="translate(%g,%g)" % (dx, dy))
plotArea.Rect(x = 0, y = 0, width = self.plotWidth, height = self.plotHeight, fill = 'none')
self.yaxis(0, flip = False)
self.yaxis(self.plotWidth, flip = True, text = False)
if grid:
self.grid()
# plot bars
barPAD = 4*self.PAD
barWidth = (self.plotWidth - 2*(max(1, len(values) - 1))*barPAD) / len(values)
color = itertools.cycle(colors)
x = barPAD
for idx, value in enumerate(values):
barHeight = (value - miny)*self.yscale
y = self.plotHeight - barHeight
plotArea.Rect(x = x, y = y, width = barWidth, height = barHeight, fill = color.next())
s = unicode(labels[idx])
size = self.textSize(s)
xpos = x + .5*barWidth - .5*size.width
ypos = self.plotHeight + 2*self.PAD + .5*size.height
self.plotArea.EText(self.font, s, x = xpos, y = ypos,
fill = labelColor, **self.textStyle)
x += barWidth + barPAD
if __name__ == '__main__':
from svgplotlib.SVG import show
graph = Bar(
(10,50,100),
width = 1000, height = 500,
titleColor = 'blue',
title = 'Simple bar plot',
xlabel = 'X axis',
ylabel = 'Y axis',
grid = True,
)
show(graph, graph.width, graph.height)
| bsd-3-clause |
fiji-flo/servo | tests/wpt/web-platform-tests/webdriver/tests/contexts/maximize_window.py | 11 | 8104 | # META: timeout=long
from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
alert_doc = inline("<script>window.alert()</script>")
def maximize(session):
return session.transport.send("POST", "session/%s/window/maximize" % session.session_id)
# 10.7.3 Maximize Window
def test_no_browsing_context(session, create_window):
"""
2. If the current top-level browsing context is no longer open,
return error with error code no such window.
"""
session.window_handle = create_window()
session.close()
response = maximize(session)
assert_error(response, "no such window")
def test_handle_prompt_dismiss_and_notify():
"""TODO"""
def test_handle_prompt_accept_and_notify():
"""TODO"""
def test_handle_prompt_ignore():
"""TODO"""
def test_handle_prompt_accept(new_session, add_browser_capabilites):
"""
3. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- accept state
Accept the current user prompt.
"""
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "accept"})}})
session.url = inline("<title>WD doc title</title>")
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
response = maximize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #1")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
response = maximize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #2")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
response = maximize(session)
assert response.status == 200
assert_dialog_handled(session, "dismiss #3")
def test_handle_prompt_missing_value(session, create_dialog):
"""
3. Handle any user prompts and return its value if it is an error.
[...]
In order to handle any user prompts a remote end must take the
following steps:
[...]
2. Perform the following substeps based on the current session's
user prompt handler:
[...]
- missing value default state
1. Dismiss the current user prompt.
2. Return error with error code unexpected alert open.
"""
session.url = inline("<title>WD doc title</title>")
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
response = maximize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
response = maximize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
response = maximize(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
def test_fully_exit_fullscreen(session):
"""
4. Fully exit fullscreen.
[...]
To fully exit fullscreen a document document, run these steps:
1. If document's fullscreen element is null, terminate these steps.
2. Unfullscreen elements whose fullscreen flag is set, within
document's top layer, except for document's fullscreen element.
3. Exit fullscreen document.
"""
session.window.fullscreen()
assert session.execute_script("return window.fullScreen") is True
response = maximize(session)
assert_success(response)
assert session.execute_script("return window.fullScreen") is False
def test_restore_the_window(session):
"""
5. Restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
session.window.minimize()
assert session.execute_script("return document.hidden") is True
response = maximize(session)
assert_success(response)
def test_maximize(session):
"""
6. Maximize the window of the current browsing context.
[...]
To maximize the window, given an operating system level window with an
associated top-level browsing context, run the implementation-specific
steps to transition the operating system level window into the
maximized window state. If the window manager supports window
resizing but does not have a concept of window maximation, the window
dimensions must be increased to the maximum available size permitted
by the window manager for the current screen. Return when the window
has completed the transition, or within an implementation-defined
timeout.
"""
before_size = session.window.size
response = maximize(session)
assert_success(response)
assert before_size != session.window.size
def test_payload(session):
"""
7. Return success with the JSON serialization of the current top-level
browsing context's window rect.
[...]
A top-level browsing context's window rect is defined as a
dictionary of the screenX, screenY, width and height attributes of
the WindowProxy. Its JSON representation is the following:
"x"
WindowProxy's screenX attribute.
"y"
WindowProxy's screenY attribute.
"width"
Width of the top-level browsing context's outer dimensions,
including any browser chrome and externally drawn window
decorations in CSS reference pixels.
"height"
Height of the top-level browsing context's outer dimensions,
including any browser chrome and externally drawn window
decorations in CSS reference pixels.
"""
before_size = session.window.size
response = maximize(session)
# step 5
assert response.status == 200
assert isinstance(response.body["value"], dict)
value = response.body["value"]
assert "width" in value
assert "height" in value
assert "x" in value
assert "y" in value
assert isinstance(value["width"], int)
assert isinstance(value["height"], int)
assert isinstance(value["x"], int)
assert isinstance(value["y"], int)
assert before_size != session.window.size
def test_maximize_twice_is_idempotent(session):
first_response = maximize(session)
assert_success(first_response)
max_size = session.window.size
second_response = maximize(session)
assert_success(second_response)
assert session.window.size == max_size
"""
TODO(ato): Implicit session start does not use configuration passed on
from wptrunner. This causes an exception.
See https://bugzil.la/1398459.
def test_maximize_when_resized_to_max_size(session):
# Determine the largest available window size by first maximising
# the window and getting the window rect dimensions.
#
# Then resize the window to the maximum available size.
session.end()
available = session.window.maximize()
session.end()
session.window.size = available
# In certain window managers a window extending to the full available
# dimensions of the screen may not imply that the window is maximised,
# since this is often a special state. If a remote end expects a DOM
# resize event, this may not fire if the window has already reached
# its expected dimensions.
before = session.window.size
session.window.maximize()
assert session.window.size == before
"""
| mpl-2.0 |
nthiep/global-ssh-server | lib/python2.7/site-packages/django/contrib/gis/db/models/manager.py | 505 | 3578 | from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_query_set(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_query_set().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_query_set().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_query_set().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_query_set().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_query_set().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_query_set().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_query_set().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_query_set().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_query_set().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_query_set().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_query_set().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_query_set().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_query_set().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_query_set().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_query_set().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_query_set().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_query_set().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_query_set().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_query_set().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_query_set().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_query_set().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_query_set().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_query_set().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_query_set().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_query_set().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_query_set().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_query_set().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_query_set().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_query_set().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_query_set().unionagg(*args, **kwargs)
| agpl-3.0 |
code4futuredotorg/reeborg_tw | src/libraries/Brython3.2.3/Lib/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 |
xolox/python-deb-pkg-tools | deb_pkg_tools/config.py | 1 | 2091 | # Debian packaging tools: Configuration defaults.
#
# Author: Peter Odding <[email protected]>
# Last Change: February 6, 2020
# URL: https://github.com/xolox/python-deb-pkg-tools
"""Configuration defaults for the `deb-pkg-tools` package."""
# Standard library modules.
import os
# External dependencies.
from humanfriendly import parse_path
# Public identifiers that require documentation.
__all__ = (
"package_cache_directory",
"repo_config_file",
"system_cache_directory",
"system_config_directory",
"user_cache_directory",
"user_config_directory",
)
system_config_directory = '/etc/deb-pkg-tools'
"""The pathname of the global (system wide) configuration directory used by `deb-pkg-tools` (a string)."""
system_cache_directory = '/var/cache/deb-pkg-tools'
"""The pathname of the global (system wide) package cache directory (a string)."""
user_config_directory = parse_path('~/.deb-pkg-tools')
"""
The pathname of the current user's configuration directory used by `deb-pkg-tools` (a string).
:default: The expanded value of ``~/.deb-pkg-tools``.
"""
user_cache_directory = parse_path('~/.cache/deb-pkg-tools')
"""
The pathname of the current user's package cache directory (a string).
:default: The expanded value of ``~/.cache/deb-pkg-tools``.
"""
# The location of the package cache. If we're running as root we have write
# access to the system wide package cache so we'll pick that; the more users
# sharing this cache the more effective it is.
package_cache_directory = system_cache_directory if os.getuid() == 0 else user_cache_directory
"""
The pathname of the selected package cache directory (a string).
:default: The value of :data:`system_cache_directory` when running as ``root``,
the value of :data:`user_cache_directory` otherwise.
"""
repo_config_file = 'repos.ini'
"""
The base name of the configuration file with user-defined Debian package repositories (a string).
This configuration file is loaded from :data:`system_config_directory` and/or
:data:`user_config_directory`.
:default: The string ``repos.ini``.
"""
| mit |
40223114/2015_g4 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/sprite.py | 603 | 55779 | ## pygame - Python Game Library
## Copyright (C) 2000-2003, 2007 Pete Shinners
## (C) 2004 Joe Wreschnig
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
"""pygame module with basic game object classes
This module contains several simple classes to be used within games. There
are the main Sprite class and several Group classes that contain Sprites.
The use of these classes is entirely optional when using Pygame. The classes
are fairly lightweight and only provide a starting place for the code
that is common to most games.
The Sprite class is intended to be used as a base class for the different
types of objects in the game. There is also a base Group class that simply
stores sprites. A game could create new types of Group classes that operate
on specially customized Sprite instances they contain.
The basic Sprite class can draw the Sprites it contains to a Surface. The
Group.draw() method requires that each Sprite have a Surface.image attribute
and a Surface.rect. The Group.clear() method requires these same attributes
and can be used to erase all the Sprites with background. There are also
more advanced Groups: pygame.sprite.RenderUpdates() and
pygame.sprite.OrderedUpdates().
Lastly, this module contains several collision functions. These help find
sprites inside multiple groups that have intersecting bounding rectangles.
To find the collisions, the Sprites are required to have a Surface.rect
attribute assigned.
The groups are designed for high efficiency in removing and adding Sprites
to them. They also allow cheap testing to see if a Sprite already exists in
a Group. A given Sprite can exist in any number of groups. A game could use
some groups to control object rendering, and a completely separate set of
groups to control interaction or player movement. Instead of adding type
attributes or bools to a derived Sprite class, consider keeping the
Sprites inside organized Groups. This will allow for easier lookup later
in the game.
Sprites and Groups manage their relationships with the add() and remove()
methods. These methods can accept a single or multiple group arguments for
membership. The default initializers for these classes also take a
single group or list of groups as argments for initial membership. It is safe
to repeatedly add and remove the same Sprite from a Group.
While it is possible to design sprite and group classes that don't derive
from the Sprite and AbstractGroup classes below, it is strongly recommended
that you extend those when you create a new Sprite or Group class.
Sprites are not thread safe, so lock them yourself if using threads.
"""
##todo
## a group that holds only the 'n' most recent elements.
## sort of like the GroupSingle class, but holding more
## than one sprite
##
## drawing groups that can 'automatically' store the area
## underneath so they can "clear" without needing a background
## function. obviously a little slower than normal, but nice
## to use in many situations. (also remember it must "clear"
## in the reverse order that it draws :])
##
## the drawing groups should also be able to take a background
## function, instead of just a background surface. the function
## would take a surface and a rectangle on that surface to erase.
##
## perhaps more types of collision functions? the current two
## should handle just about every need, but perhaps more optimized
## specific ones that aren't quite so general but fit into common
## specialized cases.
import pygame
from pygame.rect import Rect
from pygame.time import get_ticks
from operator import truth
# Python 3 does not have the callable function, but an equivalent can be made
# with the hasattr function.
#if 'callable' not in dir(__builtins__):
callable = lambda obj: hasattr(obj, '__call__')
# Don't depend on pygame.mask if it's not there...
try:
from pygame.mask import from_surface
except:
pass
class Sprite(object):
"""simple base class for visible game objects
pygame.sprite.Sprite(*groups): return Sprite
The base class for visible game objects. Derived classes will want to
override the Sprite.update() method and assign Sprite.image and Sprite.rect
attributes. The initializer can accept any number of Group instances that
the Sprite will become a member of.
When subclassing the Sprite class, be sure to call the base initializer
before adding the Sprite to Groups.
"""
def __init__(self, *groups):
self.__g = {} # The groups the sprite is in
if groups:
self.add(*groups)
def add(self, *groups):
"""add the sprite to groups
Sprite.add(*groups): return None
Any number of Group instances can be passed as arguments. The
Sprite will be added to the Groups it is not already a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if not has(group):
group.add_internal(self)
self.add_internal(group)
else:
self.add(*group)
def remove(self, *groups):
"""remove the sprite from groups
Sprite.remove(*groups): return None
Any number of Group instances can be passed as arguments. The Sprite
will be removed from the Groups it is currently a member of.
"""
has = self.__g.__contains__
for group in groups:
if hasattr(group, '_spritegroup'):
if has(group):
group.remove_internal(self)
self.remove_internal(group)
else:
self.remove(*group)
def add_internal(self, group):
self.__g[group] = 0
def remove_internal(self, group):
del self.__g[group]
def update(self, *args):
"""method to control sprite behavior
Sprite.update(*args):
The default implementation of this method does nothing; it's just a
convenient "hook" that you can override. This method is called by
Group.update() with whatever arguments you give it.
There is no need to use this method if not using the convenience
method by the same name in the Group class.
"""
pass
def kill(self):
"""remove the Sprite from all Groups
Sprite.kill(): return None
The Sprite is removed from all the Groups that contain it. This won't
change anything about the state of the Sprite. It is possible to
continue to use the Sprite after this method has been called, including
adding it to Groups.
"""
for c in self.__g:
c.remove_internal(self)
self.__g.clear()
def groups(self):
"""list of Groups that contain this Sprite
Sprite.groups(): return group_list
Returns a list of all the Groups that contain this Sprite.
"""
return list(self.__g)
def alive(self):
"""does the sprite belong to any groups
Sprite.alive(): return bool
Returns True when the Sprite belongs to one or more Groups.
"""
return truth(self.__g)
def __repr__(self):
return "<%s sprite(in %d groups)>" % (self.__class__.__name__, len(self.__g))
class DirtySprite(Sprite):
"""a more featureful subclass of Sprite with more attributes
pygame.sprite.DirtySprite(*groups): return DirtySprite
Extra DirtySprite attributes with their default values:
dirty = 1
If set to 1, it is repainted and then set to 0 again.
If set to 2, it is always dirty (repainted each frame;
flag is not reset).
If set to 0, it is not dirty and therefore not repainted again.
blendmode = 0
It's the special_flags argument of Surface.blit; see the blendmodes in
the Surface.blit documentation
source_rect = None
This is the source rect to use. Remember that it is relative to the top
left corner (0, 0) of self.image.
visible = 1
Normally this is 1. If set to 0, it will not be repainted. (If you
change visible to 1, you must set dirty to 1 for it to be erased from
the screen.)
_layer = 0
A READ ONLY value, it is read when adding it to the LayeredUpdates
group. For details see documentation of sprite.LayeredUpdates.
"""
def __init__(self, *groups):
self.dirty = 1
self.blendmode = 0 # pygame 1.8, referred to as special_flags in
# the documentation of Surface.blit
self._visible = 1
self._layer = 0 # READ ONLY by LayeredUpdates or LayeredDirty
self.source_rect = None
Sprite.__init__(self, *groups)
def _set_visible(self, val):
"""set the visible value (0 or 1) and makes the sprite dirty"""
self._visible = val
if self.dirty < 2:
self.dirty = 1
def _get_visible(self):
"""return the visible value of that sprite"""
return self._visible
visible = property(lambda self: self._get_visible(),
lambda self, value: self._set_visible(value),
doc="you can make this sprite disappear without "
"removing it from the group,\n"
"assign 0 for invisible and 1 for visible")
def __repr__(self):
return "<%s DirtySprite(in %d groups)>" % \
(self.__class__.__name__, len(self.groups()))
class AbstractGroup(object):
"""base class for containers of sprites
AbstractGroup does everything needed to behave as a normal group. You can
easily subclass a new group class from this or the other groups below if
you want to add more features.
Any AbstractGroup-derived sprite groups act like sequences and support
iteration, len, and so on.
"""
# dummy val to identify sprite groups, and avoid infinite recursion
_spritegroup = True
def __init__(self):
self.spritedict = {}
self.lostsprites = []
def sprites(self):
"""get a list of sprites in the group
Group.sprite(): return list
Returns an object that can be looped over with a 'for' loop. (For now,
it is always a list, but this could change in a future version of
pygame.) Alternatively, you can get the same information by iterating
directly over the sprite group, e.g. 'for sprite in group'.
"""
return list(self.spritedict)
def add_internal(self, sprite):
self.spritedict[sprite] = 0
def remove_internal(self, sprite):
r = self.spritedict[sprite]
if r:
self.lostsprites.append(r)
del self.spritedict[sprite]
def has_internal(self, sprite):
return sprite in self.spritedict
def copy(self):
"""copy a group with all the same sprites
Group.copy(): return Group
Returns a copy of the group that is an instance of the same class
and has the same sprites in it.
"""
return self.__class__(self.sprites())
def __iter__(self):
return iter(self.sprites())
def __contains__(self, sprite):
return self.has(sprite)
def add(self, *sprites):
"""add sprite(s) to group
Group.add(sprite, list, group, ...): return None
Adds a sprite or sequence of sprites to a group.
"""
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite)
sprite.add_internal(self)
def remove(self, *sprites):
"""remove sprite(s) from group
Group.remove(sprite, list, or group, ...): return None
Removes a sprite or sequence of sprites from a group.
"""
# This function behaves essentially the same as Group.add. It first
# tries to handle each argument as an instance of the Sprite class. If
# that failes, then it tries to handle the argument as an iterable
# object. If that failes, then it tries to handle the argument as an
# old-style sprite group. Lastly, if that fails, it assumes that the
# normal Sprite methods should be used.
for sprite in sprites:
if isinstance(sprite, Sprite):
if self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
else:
try:
self.remove(*sprite)
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
self.remove_internal(spr)
spr.remove_internal(self)
elif self.has_internal(sprite):
self.remove_internal(sprite)
sprite.remove_internal(self)
def has(self, *sprites):
"""ask if group has a sprite or sprites
Group.has(sprite or group, ...): return bool
Returns True if the given sprite or sprites are contained in the
group. Alternatively, you can get the same information using the
'in' operator, e.g. 'sprite in group', 'subgroup in group'.
"""
return_value = False
for sprite in sprites:
if isinstance(sprite, Sprite):
# Check for Sprite instance's membership in this group
if self.has_internal(sprite):
return_value = True
else:
return False
else:
try:
if self.has(*sprite):
return_value = True
else:
return False
except (TypeError, AttributeError):
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if self.has_internal(spr):
return_value = True
else:
return False
else:
if self.has_internal(sprite):
return_value = True
else:
return False
return return_value
def update(self, *args):
"""call the update method of every member sprite
Group.update(*args): return None
Calls the update method of every member sprite. All arguments that
were passed to this method are passed to the Sprite update function.
"""
for s in self.sprites():
s.update(*args)
def draw(self, surface):
"""draw all sprites onto the surface
Group.draw(surface): return None
Draws all of the member sprites onto the given surface.
"""
#from javascript import console
sprites = self.sprites()
surface_blit = surface.blit
for spr in sprites:
#console.log(spr.image, spr.rect)
#console.log(spr.image._canvas.width, spr.image._canvas.height)
self.spritedict[spr] = surface_blit(spr.image, spr.rect)
self.lostsprites = []
def clear(self, surface, bgd):
"""erase the previous position of all sprites
Group.clear(surface, bgd): return None
Clears the area under every drawn sprite in the group. The bgd
argument should be Surface which is the same dimensions as the
screen surface. The bgd could also be a function which accepts
the given surface and the area to be cleared as arguments.
"""
if callable(bgd):
for r in self.lostsprites:
bgd(surface, r)
for r in self.spritedict.values():
if r:
bgd(surface, r)
else:
surface_blit = surface.blit
for r in self.lostsprites:
surface_blit(bgd, r, r)
for r in self.spritedict.values():
if r:
surface_blit(bgd, r, r)
def empty(self):
"""remove all sprites
Group.empty(): return None
Removes all the sprites from the group.
"""
for s in self.sprites():
self.remove_internal(s)
s.remove_internal(self)
def __nonzero__(self):
return truth(self.sprites())
def __len__(self):
"""return number of sprites in group
Group.len(group): return int
Returns the number of sprites contained in the group.
"""
return len(self.sprites())
def __repr__(self):
return "<%s(%d sprites)>" % (self.__class__.__name__, len(self))
class Group(AbstractGroup):
"""container class for many Sprites
pygame.sprite.Group(*sprites): return Group
A simple container for Sprite objects. This class can be subclassed to
create containers with more specific behaviors. The constructor takes any
number of Sprite arguments to add to the Group. The group supports the
following standard Python operations:
in test if a Sprite is contained
len the number of Sprites contained
bool test if any Sprites are contained
iter iterate through all the Sprites
The Sprites in the Group are not ordered, so the Sprites are drawn and
iterated over in no particular order.
"""
def __init__(self, *sprites):
AbstractGroup.__init__(self)
self.add(*sprites)
RenderPlain = Group
RenderClear = Group
class RenderUpdates(Group):
"""Group class that tracks dirty updates
pygame.sprite.RenderUpdates(*sprites): return RenderUpdates
This class is derived from pygame.sprite.Group(). It has an enhanced draw
method that tracks the changed areas of the screen.
"""
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
newrect = surface_blit(s.image, s.rect)
if r:
if newrect.colliderect(r):
dirty_append(newrect.union(r))
else:
dirty_append(newrect)
dirty_append(r)
else:
dirty_append(newrect)
spritedict[s] = newrect
return dirty
class OrderedUpdates(RenderUpdates):
"""RenderUpdates class that draws Sprites in order of addition
pygame.sprite.OrderedUpdates(*spites): return OrderedUpdates
This class derives from pygame.sprite.RenderUpdates(). It maintains
the order in which the Sprites were added to the Group for rendering.
This makes adding and removing Sprites from the Group a little
slower than regular Groups.
"""
def __init__(self, *sprites):
self._spritelist = []
RenderUpdates.__init__(self, *sprites)
def sprites(self):
return list(self._spritelist)
def add_internal(self, sprite):
RenderUpdates.add_internal(self, sprite)
self._spritelist.append(sprite)
def remove_internal(self, sprite):
RenderUpdates.remove_internal(self, sprite)
self._spritelist.remove(sprite)
class LayeredUpdates(AbstractGroup):
"""LayeredUpdates Group handles layers, which are drawn like OrderedUpdates
pygame.sprite.LayeredUpdates(*spites, **kwargs): return LayeredUpdates
This group is fully compatible with pygame.sprite.Sprite.
New in pygame 1.8.0
"""
_init_rect = Rect(0, 0, 0, 0)
def __init__(self, *sprites, **kwargs):
"""initialize an instance of LayeredUpdates with the given attributes
You can set the default layer through kwargs using 'default_layer'
and an integer for the layer. The default layer is 0.
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
self._spritelayers = {}
self._spritelist = []
AbstractGroup.__init__(self)
self._default_layer = kwargs.get('default_layer', 0)
self.add(*sprites, **kwargs)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
self.spritedict[sprite] = self._init_rect
if layer is None:
try:
layer = sprite._layer
except AttributeError:
layer = sprite._layer = self._default_layer
elif hasattr(sprite, '_layer'):
sprite._layer = layer
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers
sprites_layers[sprite] = layer
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= layer:
mid += 1
sprites.insert(mid, sprite)
def add(self, *sprites, **kwargs):
"""add a sprite or sequence of sprites to a group
LayeredUpdates.add(*sprites, **kwargs): return None
If the sprite you add has an attribute _layer, then that layer will be
used. If **kwarg contains 'layer', then the passed sprites will be
added to that layer (overriding the sprite._layer attribute). If
neither the sprite nor **kwarg has a 'layer', then the default layer is
used to add the sprites.
"""
if not sprites:
return
if 'layer' in kwargs:
layer = kwargs['layer']
else:
layer = None
for sprite in sprites:
# It's possible that some sprite is also an iterator.
# If this is the case, we should add the sprite itself,
# and not the iterator object.
if isinstance(sprite, Sprite):
if not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
else:
try:
# See if sprite is an iterator, like a list or sprite
# group.
self.add(*sprite, **kwargs)
except (TypeError, AttributeError):
# Not iterable. This is probably a sprite that is not an
# instance of the Sprite class or is not an instance of a
# subclass of the Sprite class. Alternately, it could be an
# old-style sprite group.
if hasattr(sprite, '_spritegroup'):
for spr in sprite.sprites():
if not self.has_internal(spr):
self.add_internal(spr, layer)
spr.add_internal(self)
elif not self.has_internal(sprite):
self.add_internal(sprite, layer)
sprite.add_internal(self)
def remove_internal(self, sprite):
"""Do not use this method directly.
The group uses it to add a sprite.
"""
self._spritelist.remove(sprite)
# these dirty rects are suboptimal for one frame
r = self.spritedict[sprite]
if r is not self._init_rect:
self.lostsprites.append(r) # dirty rect
if hasattr(sprite, 'rect'):
self.lostsprites.append(sprite.rect) # dirty rect
del self.spritedict[sprite]
del self._spritelayers[sprite]
def sprites(self):
"""return a ordered list of sprites (first back, last top).
LayeredUpdates.sprites(): return sprites
"""
return list(self._spritelist)
def draw(self, surface):
"""draw all sprites in the right order onto the passed surface
LayeredUpdates.draw(surface): return Rect_list
"""
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
init_rect = self._init_rect
for spr in self.sprites():
rec = spritedict[spr]
newrect = surface_blit(spr.image, spr.rect)
if rec is init_rect:
dirty_append(newrect)
else:
if newrect.colliderect(rec):
dirty_append(newrect.union(rec))
else:
dirty_append(newrect)
dirty_append(rec)
spritedict[spr] = newrect
return dirty
def get_sprites_at(self, pos):
"""return a list with all sprites at that position
LayeredUpdates.get_sprites_at(pos): return colliding_sprites
Bottom sprites are listed first; the top ones are listed last.
"""
_sprites = self._spritelist
rect = Rect(pos, (0, 0))
colliding_idx = rect.collidelistall(_sprites)
colliding = [_sprites[i] for i in colliding_idx]
return colliding
def get_sprite(self, idx):
"""return the sprite at the index idx from the groups sprites
LayeredUpdates.get_sprite(idx): return sprite
Raises IndexOutOfBounds if the idx is not within range.
"""
return self._spritelist[idx]
def remove_sprites_of_layer(self, layer_nr):
"""remove all sprites from a layer and return them as a list
LayeredUpdates.remove_sprites_of_layer(layer_nr): return sprites
"""
sprites = self.get_sprites_from_layer(layer_nr)
self.remove(*sprites)
return sprites
#---# layer methods
def layers(self):
"""return a list of unique defined layers defined.
LayeredUpdates.layers(): return layers
"""
return sorted(set(self._spritelayers.values()))
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
sprites = self._spritelist # speedup
sprites_layers = self._spritelayers # speedup
sprites.remove(sprite)
sprites_layers.pop(sprite)
# add the sprite at the right position
# bisect algorithmus
leng = len(sprites)
low = mid = 0
high = leng - 1
while low <= high:
mid = low + (high - low) // 2
if sprites_layers[sprites[mid]] <= new_layer:
low = mid + 1
else:
high = mid - 1
# linear search to find final position
while mid < leng and sprites_layers[sprites[mid]] <= new_layer:
mid += 1
sprites.insert(mid, sprite)
if hasattr(sprite, 'layer'):
sprite.layer = new_layer
# add layer info
sprites_layers[sprite] = new_layer
def get_layer_of_sprite(self, sprite):
"""return the layer that sprite is currently in
If the sprite is not found, then it will return the default layer.
"""
return self._spritelayers.get(sprite, self._default_layer)
def get_top_layer(self):
"""return the top layer
LayeredUpdates.get_top_layer(): return layer
"""
return self._spritelayers[self._spritelist[-1]]
def get_bottom_layer(self):
"""return the bottom layer
LayeredUpdates.get_bottom_layer(): return layer
"""
return self._spritelayers[self._spritelist[0]]
def move_to_front(self, sprite):
"""bring the sprite to front layer
LayeredUpdates.move_to_front(sprite): return None
Brings the sprite to front by changing the sprite layer to the top-most
layer. The sprite is added at the end of the list of sprites in that
top-most layer.
"""
self.change_layer(sprite, self.get_top_layer())
def move_to_back(self, sprite):
"""move the sprite to the bottom layer
LayeredUpdates.move_to_back(sprite): return None
Moves the sprite to the bottom layer by moving it to a new layer below
the current bottom layer.
"""
self.change_layer(sprite, self.get_bottom_layer() - 1)
def get_top_sprite(self):
"""return the topmost sprite
LayeredUpdates.get_top_sprite(): return Sprite
"""
return self._spritelist[-1]
def get_sprites_from_layer(self, layer):
"""return all sprites from a layer ordered as they where added
LayeredUpdates.get_sprites_from_layer(layer): return sprites
Returns all sprites from a layer. The sprites are ordered in the
sequence that they where added. (The sprites are not removed from the
layer.
"""
sprites = []
sprites_append = sprites.append
sprite_layers = self._spritelayers
for spr in self._spritelist:
if sprite_layers[spr] == layer:
sprites_append(spr)
elif sprite_layers[spr] > layer:# break after because no other will
# follow with same layer
break
return sprites
def switch_layer(self, layer1_nr, layer2_nr):
"""switch the sprites from layer1_nr to layer2_nr
LayeredUpdates.switch_layer(layer1_nr, layer2_nr): return None
The layers number must exist. This method does not check for the
existence of the given layers.
"""
sprites1 = self.remove_sprites_of_layer(layer1_nr)
for spr in self.get_sprites_from_layer(layer2_nr):
self.change_layer(spr, layer1_nr)
self.add(layer=layer2_nr, *sprites1)
class LayeredDirty(LayeredUpdates):
"""LayeredDirty Group is for DirtySprites; subclasses LayeredUpdates
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
This group requires pygame.sprite.DirtySprite or any sprite that
has the following attributes:
image, rect, dirty, visible, blendmode (see doc of DirtySprite).
It uses the dirty flag technique and is therefore faster than
pygame.sprite.RenderUpdates if you have many static sprites. It
also switches automatically between dirty rect updating and full
screen drawing, so you do no have to worry which would be faster.
As with the pygame.sprite.Group, you can specify some additional attributes
through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect mode
and fullscreen mode; defaults to updating at 80 frames per second,
which is equal to 1000.0 / 80.0
New in pygame 1.8.0
"""
def __init__(self, *sprites, **kwargs):
"""initialize group.
pygame.sprite.LayeredDirty(*spites, **kwargs): return LayeredDirty
You can specify some additional attributes through kwargs:
_use_update: True/False (default is False)
_default_layer: default layer where the sprites without a layer are
added
_time_threshold: treshold time for switching between dirty rect
mode and fullscreen mode; defaults to updating at 80 frames per
second, which is equal to 1000.0 / 80.0
"""
LayeredUpdates.__init__(self, *sprites, **kwargs)
self._clip = None
self._use_update = False
self._time_threshold = 1000.0 / 80.0 # 1000.0 / fps
self._bgd = None
for key, val in kwargs.items():
if key in ['_use_update', '_time_threshold', '_default_layer']:
if hasattr(self, key):
setattr(self, key, val)
def add_internal(self, sprite, layer=None):
"""Do not use this method directly.
It is used by the group to add a sprite internally.
"""
# check if all needed attributes are set
if not hasattr(sprite, 'dirty'):
raise AttributeError()
if not hasattr(sprite, 'visible'):
raise AttributeError()
if not hasattr(sprite, 'blendmode'):
raise AttributeError()
if not isinstance(sprite, DirtySprite):
raise TypeError()
if sprite.dirty == 0: # set it dirty if it is not
sprite.dirty = 1
LayeredUpdates.add_internal(self, sprite, layer)
def draw(self, surface, bgd=None):
"""draw all sprites in the right order onto the given surface
LayeredDirty.draw(surface, bgd=None): return Rect_list
You can pass the background too. If a self.bgd is already set to some
value that is not None, then the bgd argument has no effect.
"""
# speedups
_orig_clip = surface.get_clip()
_clip = self._clip
if _clip is None:
_clip = _orig_clip
_surf = surface
_sprites = self._spritelist
_old_rect = self.spritedict
_update = self.lostsprites
_update_append = _update.append
_ret = None
_surf_blit = _surf.blit
_rect = Rect
if bgd is not None:
self._bgd = bgd
_bgd = self._bgd
init_rect = self._init_rect
_surf.set_clip(_clip)
# -------
# 0. decide whether to render with update or flip
start_time = get_ticks()
if self._use_update: # dirty rects mode
# 1. find dirty area on screen and put the rects into _update
# still not happy with that part
for spr in _sprites:
if 0 < spr.dirty:
# chose the right rect
if spr.source_rect:
_union_rect = _rect(spr.rect.topleft,
spr.source_rect.size)
else:
_union_rect = _rect(spr.rect)
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
if _old_rect[spr] is not init_rect:
_union_rect = _rect(_old_rect[spr])
_union_rect_collidelist = _union_rect.collidelist
_union_rect_union_ip = _union_rect.union_ip
i = _union_rect_collidelist(_update)
while -1 < i:
_union_rect_union_ip(_update[i])
del _update[i]
i = _union_rect_collidelist(_update)
_update_append(_union_rect.clip(_clip))
# can it be done better? because that is an O(n**2) algorithm in
# worst case
# clear using background
if _bgd is not None:
for rec in _update:
_surf_blit(_bgd, rec, rec)
# 2. draw
for spr in _sprites:
if 1 > spr.dirty:
if spr._visible:
# sprite not dirty; blit only the intersecting part
_spr_rect = spr.rect
if spr.source_rect is not None:
_spr_rect = Rect(spr.rect.topleft,
spr.source_rect.size)
_spr_rect_clip = _spr_rect.clip
for idx in _spr_rect.collidelistall(_update):
# clip
clip = _spr_rect_clip(_update[idx])
_surf_blit(spr.image,
clip,
(clip[0] - _spr_rect[0],
clip[1] - _spr_rect[1],
clip[2],
clip[3]),
spr.blendmode)
else: # dirty sprite
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
if spr.dirty == 1:
spr.dirty = 0
_ret = list(_update)
else: # flip, full screen mode
if _bgd is not None:
_surf_blit(_bgd, (0, 0))
for spr in _sprites:
if spr._visible:
_old_rect[spr] = _surf_blit(spr.image,
spr.rect,
spr.source_rect,
spr.blendmode)
_ret = [_rect(_clip)] # return only the part of the screen changed
# timing for switching modes
# How may a good threshold be found? It depends on the hardware.
end_time = get_ticks()
if end_time-start_time > self._time_threshold:
self._use_update = False
else:
self._use_update = True
## # debug
## print " check: using dirty rects:", self._use_update
# emtpy dirty rects list
_update[:] = []
# -------
# restore original clip
_surf.set_clip(_orig_clip)
return _ret
def clear(self, surface, bgd):
"""use to set background
Group.clear(surface, bgd): return None
"""
self._bgd = bgd
def repaint_rect(self, screen_rect):
"""repaint the given area
LayeredDirty.repaint_rect(screen_rect): return None
screen_rect is in screen coordinates.
"""
if self._clip:
self.lostsprites.append(screen_rect.clip(self._clip))
else:
self.lostsprites.append(Rect(screen_rect))
def set_clip(self, screen_rect=None):
"""clip the area where to draw; pass None (default) to reset the clip
LayeredDirty.set_clip(screen_rect=None): return None
"""
if screen_rect is None:
self._clip = pygame.display.get_surface().get_rect()
else:
self._clip = screen_rect
self._use_update = False
def get_clip(self):
"""get the area where drawing will occur
LayeredDirty.get_clip(): return Rect
"""
return self._clip
def change_layer(self, sprite, new_layer):
"""change the layer of the sprite
LayeredUpdates.change_layer(sprite, new_layer): return None
The sprite must have been added to the renderer already. This is not
checked.
"""
LayeredUpdates.change_layer(self, sprite, new_layer)
if sprite.dirty == 0:
sprite.dirty = 1
def set_timing_treshold(self, time_ms):
"""set the treshold in milliseconds
set_timing_treshold(time_ms): return None
Defaults to 1000.0 / 80.0. This means that the screen will be painted
using the flip method rather than the update method if the update
method is taking so long to update the screen that the frame rate falls
below 80 frames per second.
"""
self._time_threshold = time_ms
class GroupSingle(AbstractGroup):
"""A group container that holds a single most recent item.
This class works just like a regular group, but it only keeps a single
sprite in the group. Whatever sprite has been added to the group last will
be the only sprite in the group.
You can access its one sprite as the .sprite attribute. Assigning to this
attribute will properly remove the old sprite and then add the new one.
"""
def __init__(self, sprite=None):
AbstractGroup.__init__(self)
self.__sprite = None
if sprite is not None:
self.add(sprite)
def copy(self):
return GroupSingle(self.__sprite)
def sprites(self):
if self.__sprite is not None:
return [self.__sprite]
else:
return []
def add_internal(self, sprite):
if self.__sprite is not None:
self.__sprite.remove_internal(self)
self.remove_internal(self.__sprite)
self.__sprite = sprite
def __nonzero__(self):
return self.__sprite is not None
def _get_sprite(self):
return self.__sprite
def _set_sprite(self, sprite):
self.add_internal(sprite)
sprite.add_internal(self)
return sprite
sprite = property(_get_sprite,
_set_sprite,
None,
"The sprite contained in this group")
def remove_internal(self, sprite):
if sprite is self.__sprite:
self.__sprite = None
if sprite in self.spritedict:
AbstractGroup.remove_internal(self, sprite)
def has_internal(self, sprite):
return self.__sprite is sprite
# Optimizations...
def __contains__(self, sprite):
return self.__sprite is sprite
# Some different collision detection functions that could be used.
def collide_rect(left, right):
"""collision detection between two sprites, using rects.
pygame.sprite.collide_rect(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect colliderect
function to calculate the collision. It is intended to be passed as a
collided callback function to the *collide functions. Sprites must have
"rect" attributes.
New in pygame 1.8.0
"""
return left.rect.colliderect(right.rect)
class collide_rect_ratio:
"""A callable class that checks for collisions using scaled rects
The class checks for collisions between two sprites using a scaled version
of the sprites' rects. Is created with a ratio; the instance is then
intended to be passed as a collided callback function to the *collide
functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""create a new collide_rect_ratio callable
Ratio is expected to be a floating point value used to scale
the underlying sprite rect before checking for collisions.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled rects
pygame.sprite.collide_rect_ratio(ratio)(left, right): return bool
Tests for collision between two sprites. Uses the pygame.Rect
colliderect function to calculate the collision after scaling the rects
by the stored ratio. Sprites must have "rect" attributes.
"""
ratio = self.ratio
leftrect = left.rect
width = leftrect.width
height = leftrect.height
leftrect = leftrect.inflate(width * ratio - width,
height * ratio - height)
rightrect = right.rect
width = rightrect.width
height = rightrect.height
rightrect = rightrect.inflate(width * ratio - width,
height * ratio - height)
return leftrect.colliderect(rightrect)
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
class collide_circle_ratio(object):
"""detect collision between two sprites using scaled circles
This callable class checks for collisions between two sprites using a
scaled version of a sprite's radius. It is created with a ratio as the
argument to the constructor. The instance is then intended to be passed as
a collided callback function to the *collide functions.
New in pygame 1.8.1
"""
def __init__(self, ratio):
"""creates a new collide_circle_ratio callable instance
The given ratio is expected to be a floating point value used to scale
the underlying sprite radius before checking for collisions.
When the ratio is ratio=1.0, then it behaves exactly like the
collide_circle method.
"""
self.ratio = ratio
def __call__(self, left, right):
"""detect collision between two sprites using scaled circles
pygame.sprite.collide_circle_radio(ratio)(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap after scaling the circle's radius by
the stored ratio. If the sprites have a "radius" attribute, that is
used to create the circle; otherwise, a circle is created that is big
enough to completely enclose the sprite's rect as given by the "rect"
attribute. Intended to be passed as a collided callback function to the
*collide functions. Sprites must have a "rect" and an optional "radius"
attribute.
"""
ratio = self.ratio
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, "radius"):
leftradius = left.radius * ratio
else:
leftrect = left.rect
leftradius = ratio * 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, "radius"):
rightradius = right.radius * ratio
else:
rightrect = right.rect
rightradius = ratio * 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
def collide_mask(left, right):
"""collision detection between two sprites, using masks.
pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool
Tests for collision between two sprites by testing if their bitmasks
overlap. If the sprites have a "mask" attribute, that is used as the mask;
otherwise, a mask is created from the sprite image. Intended to be passed
as a collided callback function to the *collide functions. Sprites must
have a "rect" and an optional "mask" attribute.
New in pygame 1.8.0
"""
xoffset = right.rect[0] - left.rect[0]
yoffset = right.rect[1] - left.rect[1]
try:
leftmask = left.mask
except AttributeError:
leftmask = from_surface(left.image)
try:
rightmask = right.mask
except AttributeError:
rightmask = from_surface(right.image)
return leftmask.overlap(rightmask, (xoffset, yoffset))
def spritecollide(sprite, group, dokill, collided=None):
"""find Sprites in a Group that intersect another Sprite
pygame.sprite.spritecollide(sprite, group, dokill, collided=None):
return Sprite_list
Return a list containing all Sprites in a Group that intersect with another
Sprite. Intersection is determined by comparing the Sprite.rect attribute
of each Sprite.
The dokill argument is a bool. If set to True, all Sprites that collide
will be removed from the Group.
The collided argument is a callback function used to calculate if two
sprites are colliding. it should take two sprites as values, and return a
bool value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if dokill:
crashed = []
append = crashed.append
if collided:
for s in group.sprites():
if collided(sprite, s):
s.kill()
append(s)
else:
spritecollide = sprite.rect.colliderect
for s in group.sprites():
if spritecollide(s.rect):
s.kill()
append(s)
return crashed
elif collided:
return [s for s in group if collided(sprite, s)]
else:
spritecollide = sprite.rect.colliderect
return [s for s in group if spritecollide(s.rect)]
def groupcollide(groupa, groupb, dokilla, dokillb, collided=None):
"""detect collision between a group and another group
pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb):
return dict
Given two groups, this will find the intersections between all sprites in
each group. It returns a dictionary of all sprites in the first group that
collide. The value for each item in the dictionary is a list of the sprites
in the second group it collides with. The two dokill arguments control if
the sprites from either group will be automatically removed from all
groups. Collided is a callback function used to calculate if two sprites
are colliding. it should take two sprites as values, and return a bool
value indicating if they are colliding. If collided is not passed, all
sprites must have a "rect" value, which is a rectangle of the sprite area
that will be used to calculate the collision.
"""
crashed = {}
SC = spritecollide
if dokilla:
for s in groupa.sprites():
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
s.kill()
else:
for s in groupa:
c = SC(s, groupb, dokillb, collided)
if c:
crashed[s] = c
return crashed
def spritecollideany(sprite, group, collided=None):
"""finds any sprites in a group that collide with the given sprite
pygame.sprite.spritecollideany(sprite, group): return sprite
Given a sprite and a group of sprites, this will return return any single
sprite that collides with with the given sprite. If there are no
collisions, then this returns None.
If you don't need all the features of the spritecollide function, this
function will be a bit quicker.
Collided is a callback function used to calculate if two sprites are
colliding. It should take two sprites as values and return a bool value
indicating if they are colliding. If collided is not passed, then all
sprites must have a "rect" value, which is a rectangle of the sprite area,
which will be used to calculate the collision.
"""
if collided:
for s in group:
if collided(sprite, s):
return s
else:
# Special case old behaviour for speed.
spritecollide = sprite.rect.colliderect
for s in group:
if spritecollide(s.rect):
return s
return None
| gpl-3.0 |
charukiewicz/beer-manager | venv/lib/python3.4/site-packages/pip/commands/__init__.py | 476 | 2236 | """
Package containing all pip commands
"""
from pip.commands.bundle import BundleCommand
from pip.commands.completion import CompletionCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.unzip import UnzipCommand
from pip.commands.zip import ZipCommand
from pip.commands.wheel import WheelCommand
commands = {
BundleCommand.name: BundleCommand,
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
UnzipCommand.name: UnzipCommand,
ZipCommand.name: ZipCommand,
ListCommand.name: ListCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
SearchCommand,
WheelCommand,
ZipCommand,
UnzipCommand,
BundleCommand,
HelpCommand,
]
def get_summaries(ignore_hidden=True, ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands, commands_order)
else:
cmditems = commands.items()
for name, command_class in cmditems:
if ignore_hidden and command_class.hidden:
continue
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
close_commands = get_close_matches(name, commands.keys())
if close_commands:
guess = close_commands[0]
else:
guess = False
return guess
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
| mit |
jpirko/lnst | lnst/Recipes/ENRT/VirtualOvsBridgeVlansOverBondRecipe.py | 1 | 7385 | import logging
from itertools import product
from lnst.Common.Parameters import Param, IntParam, StrParam
from lnst.Common.IpAddress import ipaddress
from lnst.Controller import HostReq, DeviceReq, RecipeParam
from lnst.Recipes.ENRT.BaseEnrtRecipe import BaseEnrtRecipe
from lnst.Recipes.ENRT.ConfigMixins.OffloadSubConfigMixin import (
OffloadSubConfigMixin)
from lnst.Recipes.ENRT.ConfigMixins.CommonHWSubConfigMixin import (
CommonHWSubConfigMixin)
from lnst.Recipes.ENRT.PingMixins import VlanPingEvaluatorMixin
from lnst.RecipeCommon.Ping.PingEndpoints import PingEndpoints
from lnst.Devices import OvsBridgeDevice
class VirtualOvsBridgeVlansOverBondRecipe(VlanPingEvaluatorMixin,
CommonHWSubConfigMixin, OffloadSubConfigMixin, BaseEnrtRecipe):
host1 = HostReq()
host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.tap0 = DeviceReq(label="to_guest1")
host1.tap1 = DeviceReq(label="to_guest2")
host2 = HostReq()
host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host2.eth1 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host2.tap0 = DeviceReq(label="to_guest3")
host2.tap1 = DeviceReq(label="to_guest4")
guest1 = HostReq()
guest1.eth0 = DeviceReq(label="to_guest1")
guest2 = HostReq()
guest2.eth0 = DeviceReq(label="to_guest2")
guest3 = HostReq()
guest3.eth0 = DeviceReq(label="to_guest3")
guest4 = HostReq()
guest4.eth0 = DeviceReq(label="to_guest4")
offload_combinations = Param(default=(
dict(gro="on", gso="on", tso="on", tx="on"),
dict(gro="off", gso="on", tso="on", tx="on"),
dict(gro="on", gso="off", tso="off", tx="on"),
dict(gro="on", gso="on", tso="off", tx="off")))
bonding_mode = StrParam(mandatory = True)
miimon_value = IntParam(mandatory = True)
def test_wide_configuration(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
for host, port_name in [(host1, "bond_port1"),
(host2, "bond_port2")]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1]:
dev.down()
host.br0 = OvsBridgeDevice()
for dev, tag in [(host.tap0, "10"), (host.tap1, "20")]:
host.br0.port_add(device=dev, port_options={'tag': tag})
#miimon cannot be set due to colon in argument name -->
#other_config:bond-miimon-interval
host.br0.bond_add(port_name, (host.eth0, host.eth1),
bond_mode=self.params.bonding_mode)
guest1.eth0.down()
guest2.eth0.down()
guest3.eth0.down()
guest4.eth0.down()
configuration = super().test_wide_configuration()
configuration.test_wide_devices = [guest1.eth0, guest2.eth0,
guest3.eth0, guest4.eth0]
net_addr_1 = "192.168.10"
net_addr6_1 = "fc00:0:0:1"
net_addr_2 = "192.168.20"
net_addr6_2 = "fc00:0:0:2"
for i, guest in enumerate([guest1, guest3]):
guest.eth0.ip_add(ipaddress(net_addr_1 + "." + str(i+1) +
"/24"))
guest.eth0.ip_add(ipaddress(net_addr6_1 + "::" + str(i+1) +
"/64"))
for i, guest in enumerate([guest2, guest4]):
guest.eth0.ip_add(ipaddress(net_addr_2 + "." + str(i+1) +
"/24"))
guest.eth0.ip_add(ipaddress(net_addr6_2 + "::" + str(i+1) +
"/64"))
for host in [host1, host2]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1,
host.br0]:
dev.up()
for guest in [guest1, guest2, guest3, guest4]:
guest.eth0.up()
if "perf_tool_cpu" in self.params:
logging.info("'perf_tool_cpu' param (%d) to be set to None" %
self.params.perf_tool_cpu)
self.params.perf_tool_cpu = None
self.wait_tentative_ips(configuration.test_wide_devices)
return configuration
def generate_test_wide_description(self, config):
host1, host2 = self.matched.host1, self.matched.host2
desc = super().generate_test_wide_description(config)
desc += [
"\n".join([
"Configured {}.{}.ips = {}".format(
dev.host.hostid, dev.name, dev.ips
)
for dev in config.test_wide_devices
]),
"\n".join([
"Configured {}.{}.ports = {}".format(
dev.host.hostid, dev.name, dev.ports
)
for dev in [host1.br0, host2.br0]
]),
"\n".join([
"Configured {}.{}.bonds = {}".format(
dev.host.hostid, dev.name, dev.bonds
)
for dev in [host1.br0, host2.br0]
])
]
return desc
def test_wide_deconfiguration(self, config):
del config.test_wide_devices
super().test_wide_deconfiguration(config)
def generate_ping_endpoints(self, config):
guest1, guest2, guest3, guest4 = (self.matched.guest1,
self.matched.guest2, self.matched.guest3, self.matched.guest4)
dev_combinations = product(
[guest1.eth0, guest2.eth0],
[guest3.eth0, guest4.eth0]
)
return [
PingEndpoints(
comb[0], comb[1],
reachable=((comb[0].host, comb[1].host) in [
(guest1, guest3),
(guest2, guest4)
])
)
for comb in dev_combinations
]
def generate_perf_endpoints(self, config):
return [(self.matched.guest1.eth0, self.matched.guest3.eth0)]
@property
def offload_nics(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
result = []
for machine in host1, host2, guest1, guest2, guest3, guest4:
result.append(machine.eth0)
result.extend([host1.eth1, host2.eth1])
return result
@property
def mtu_hw_config_dev_list(self):
host1, host2, guest1, guest2, guest3, guest4 = (self.matched.host1,
self.matched.host2, self.matched.guest1, self.matched.guest2,
self.matched.guest3, self.matched.guest4)
result = []
for host in [host1, host2]:
for dev in [host.eth0, host.eth1, host.tap0, host.tap1,
host.br0]:
result.append(dev)
for guest in [guest1, guest2, guest3, guest4]:
result.append(guest.eth0)
return result
@property
def dev_interrupt_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host1.eth1,
self.matched.host2.eth0, self.matched.host2.eth1]
@property
def parallel_stream_qdisc_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host1.eth1,
self.matched.host2.eth0, self.matched.host2.eth1]
| gpl-2.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/lib/googlecloudapis/bigquery/v2/bigquery_v2_client.py | 4 | 27379 | """Generated client library for bigquery version v2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudapis.apitools.base.py import base_api
from googlecloudapis.bigquery.v2 import bigquery_v2_messages as messages
class BigqueryV2(base_api.BaseApiClient):
"""Generated client library for service bigquery version v2."""
MESSAGES_MODULE = messages
_PACKAGE = u'bigquery'
_SCOPES = [u'https://www.googleapis.com/auth/bigquery', u'https://www.googleapis.com/auth/bigquery.insertdata', u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write']
_VERSION = u'v2'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = ''
_CLIENT_CLASS_NAME = u'BigqueryV2'
_URL_VERSION = u'v2'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new bigquery handle."""
url = url or u'https://www.googleapis.com/bigquery/v2/'
super(BigqueryV2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.datasets = self.DatasetsService(self)
self.jobs = self.JobsService(self)
self.projects = self.ProjectsService(self)
self.tabledata = self.TabledataService(self)
self.tables = self.TablesService(self)
class DatasetsService(base_api.BaseApiService):
"""Service class for the datasets resource."""
_NAME = u'datasets'
def __init__(self, client):
super(BigqueryV2.DatasetsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'bigquery.datasets.delete',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[u'deleteContents'],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field='',
request_type_name=u'BigqueryDatasetsDeleteRequest',
response_type_name=u'BigqueryDatasetsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.datasets.get',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field='',
request_type_name=u'BigqueryDatasetsGetRequest',
response_type_name=u'Dataset',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.datasets.insert',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets',
request_field=u'dataset',
request_type_name=u'BigqueryDatasetsInsertRequest',
response_type_name=u'Dataset',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.datasets.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'all', u'maxResults', u'pageToken'],
relative_path=u'projects/{projectId}/datasets',
request_field='',
request_type_name=u'BigqueryDatasetsListRequest',
response_type_name=u'DatasetList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'bigquery.datasets.patch',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field=u'dataset',
request_type_name=u'BigqueryDatasetsPatchRequest',
response_type_name=u'Dataset',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'bigquery.datasets.update',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}',
request_field=u'dataset',
request_type_name=u'BigqueryDatasetsUpdateRequest',
response_type_name=u'Dataset',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.
Args:
request: (BigqueryDatasetsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BigqueryDatasetsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the dataset specified by datasetID.
Args:
request: (BigqueryDatasetsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new empty dataset.
Args:
request: (BigqueryDatasetsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all datasets in the specified project to which you have been granted the READER dataset role.
Args:
request: (BigqueryDatasetsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DatasetList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.
Args:
request: (BigqueryDatasetsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.
Args:
request: (BigqueryDatasetsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Dataset) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class JobsService(base_api.BaseApiService):
"""Service class for the jobs resource."""
_NAME = u'jobs'
def __init__(self, client):
super(BigqueryV2.JobsService, self).__init__(client)
self._method_configs = {
'Cancel': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.jobs.cancel',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'project/{projectId}/jobs/{jobId}/cancel',
request_field='',
request_type_name=u'BigqueryJobsCancelRequest',
response_type_name=u'JobCancelResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.jobs.get',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/jobs/{jobId}',
request_field='',
request_type_name=u'BigqueryJobsGetRequest',
response_type_name=u'Job',
supports_download=False,
),
'GetQueryResults': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.jobs.getQueryResults',
ordered_params=[u'projectId', u'jobId'],
path_params=[u'jobId', u'projectId'],
query_params=[u'maxResults', u'pageToken', u'startIndex', u'timeoutMs'],
relative_path=u'projects/{projectId}/queries/{jobId}',
request_field='',
request_type_name=u'BigqueryJobsGetQueryResultsRequest',
response_type_name=u'GetQueryResultsResponse',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.jobs.insert',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/jobs',
request_field=u'job',
request_type_name=u'BigqueryJobsInsertRequest',
response_type_name=u'Job',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.jobs.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'allUsers', u'maxResults', u'pageToken', u'projection', u'stateFilter'],
relative_path=u'projects/{projectId}/jobs',
request_field='',
request_type_name=u'BigqueryJobsListRequest',
response_type_name=u'JobList',
supports_download=False,
),
'Query': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.jobs.query',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/queries',
request_field=u'queryRequest',
request_type_name=u'BigqueryJobsQueryRequest',
response_type_name=u'QueryResponse',
supports_download=False,
),
}
self._upload_configs = {
'Insert': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload/bigquery/v2/projects/{projectId}/jobs',
simple_multipart=True,
simple_path=u'/upload/bigquery/v2/projects/{projectId}/jobs',
),
}
def Cancel(self, request, global_params=None):
"""Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully.
Args:
request: (BigqueryJobsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobCancelResponse) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.
Args:
request: (BigqueryJobsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def GetQueryResults(self, request, global_params=None):
"""Retrieves the results of a query job.
Args:
request: (BigqueryJobsGetQueryResultsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetQueryResultsResponse) The response message.
"""
config = self.GetMethodConfig('GetQueryResults')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None, upload=None):
"""Starts a new asynchronous job. Requires the Can View project role.
Args:
request: (BigqueryJobsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Insert')
upload_config = self.GetUploadConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config)
def List(self, request, global_params=None):
"""Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.
Args:
request: (BigqueryJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(JobList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Query(self, request, global_params=None):
"""Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.
Args:
request: (BigqueryJobsQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(QueryResponse) The response message.
"""
config = self.GetMethodConfig('Query')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(BigqueryV2.ProjectsService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.projects.list',
ordered_params=[],
path_params=[],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects',
request_field='',
request_type_name=u'BigqueryProjectsListRequest',
response_type_name=u'ProjectList',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists all projects to which you have been granted any project role.
Args:
request: (BigqueryProjectsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ProjectList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TabledataService(base_api.BaseApiService):
"""Service class for the tabledata resource."""
_NAME = u'tabledata'
def __init__(self, client):
super(BigqueryV2.TabledataService, self).__init__(client)
self._method_configs = {
'InsertAll': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.tabledata.insertAll',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll',
request_field=u'tableDataInsertAllRequest',
request_type_name=u'BigqueryTabledataInsertAllRequest',
response_type_name=u'TableDataInsertAllResponse',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.tabledata.list',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[u'maxResults', u'pageToken', u'startIndex'],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data',
request_field='',
request_type_name=u'BigqueryTabledataListRequest',
response_type_name=u'TableDataList',
supports_download=False,
),
}
self._upload_configs = {
}
def InsertAll(self, request, global_params=None):
"""Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.
Args:
request: (BigqueryTabledataInsertAllRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableDataInsertAllResponse) The response message.
"""
config = self.GetMethodConfig('InsertAll')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves table data from a specified set of rows. Requires the READER dataset role.
Args:
request: (BigqueryTabledataListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableDataList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TablesService(base_api.BaseApiService):
"""Service class for the tables resource."""
_NAME = u'tables'
def __init__(self, client):
super(BigqueryV2.TablesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'bigquery.tables.delete',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field='',
request_type_name=u'BigqueryTablesDeleteRequest',
response_type_name=u'BigqueryTablesDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.tables.get',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field='',
request_type_name=u'BigqueryTablesGetRequest',
response_type_name=u'Table',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'bigquery.tables.insert',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables',
request_field=u'table',
request_type_name=u'BigqueryTablesInsertRequest',
response_type_name=u'Table',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'bigquery.tables.list',
ordered_params=[u'projectId', u'datasetId'],
path_params=[u'datasetId', u'projectId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables',
request_field='',
request_type_name=u'BigqueryTablesListRequest',
response_type_name=u'TableList',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'bigquery.tables.patch',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field=u'table',
request_type_name=u'BigqueryTablesPatchRequest',
response_type_name=u'Table',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'bigquery.tables.update',
ordered_params=[u'projectId', u'datasetId', u'tableId'],
path_params=[u'datasetId', u'projectId', u'tableId'],
query_params=[],
relative_path=u'projects/{projectId}/datasets/{datasetId}/tables/{tableId}',
request_field=u'table',
request_type_name=u'BigqueryTablesUpdateRequest',
response_type_name=u'Table',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.
Args:
request: (BigqueryTablesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BigqueryTablesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.
Args:
request: (BigqueryTablesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new, empty table in the dataset.
Args:
request: (BigqueryTablesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all tables in the specified dataset. Requires the READER dataset role.
Args:
request: (BigqueryTablesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TableList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.
Args:
request: (BigqueryTablesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.
Args:
request: (BigqueryTablesUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Table) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
| apache-2.0 |
JonDoNym/peinjector | peinjector/connectors/python/libPePatch.py | 34 | 4600 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Provides de-serialization and in-stream patch applying capabilities for PE Files
"""
__author__ = 'A.A.'
# Unpack binary data
from struct import unpack_from
# Holds an single patch part
class PePatchPart(object):
# Constructor
def __init__(self, mem, position, insert):
self.mem = mem
self.next = None
self.position = position
self.insert = insert
self.finished = False
# Deserialize and applies patches on pe files
class PePatch(object):
# Sentinel size
pepatch_sentinelsize = 9;
# First Patch part
first = None
# Constructor
def __init__(self, serialized_memory):
serialized_mem_size = len(serialized_memory)
current_position = 0
current = None
patch = None
# Deserialize data
while (serialized_mem_size - current_position) >= self.pepatch_sentinelsize:
mem_size, position, insert = unpack_from("<II?", serialized_memory, current_position)
# 2*sizeof(uint32_t) + sizeof(uint8_t)
current_position += 9
# Length Error
if (serialized_mem_size - current_position) < mem_size:
return
# Extract Data
patch_data = serialized_memory[current_position:current_position + mem_size]
# Change Position
current_position += mem_size
# Add Patch
if mem_size > 0:
patch = PePatchPart(patch_data, position, insert)
else:
patch = None
# Build chain
if current is not None:
current.next = patch
if self.first is None:
self.first = patch
current = patch
# Length Error
if (serialized_mem_size - current_position) > 0:
self.first = None
return
# Patch is ok
def patch_ok(self):
return self.first is not None
# Apply patch on stream data
def apply_patch(self, mem, position):
all_finished = True
# Nothing to patch
if self.first is None:
return mem
# Apply each patch part
current = self.first
while current is not None:
# Finished, no need to check
if current.finished:
current = current.next
continue
# Patch starts inside memory
if position <= current.position < (position + len(mem)):
delta_position = current.position - position
# Insert
if current.insert:
mem = mem[:delta_position] + current.mem + mem[delta_position:]
# Patch part finished
current.finished = True
# Overwrite
else:
mem = mem[:delta_position] + current.mem[:len(mem)-delta_position] \
+ mem[delta_position+len(current.mem):]
# Patch applied
all_finished = False
# Append after current mem part (important if current part is the last part)
elif current.insert and (current.position == (position + len(mem))):
# Append patch
mem = mem + current.mem
# Patch part finished
current.finished = True
# Patch applied
all_finished = False
# Patch starts before memory
elif (not current.insert) and ((current.position + len(current.mem)) > position)\
and (current.position < position):
delta_position = position - current.position
mem = current.mem[delta_position:delta_position+len(mem)] + mem[len(current.mem)-delta_position:]
# Patch applied
all_finished = False
# Patch finished
elif (current.position + len(current.mem)) < position:
current.finished = True
# Reset total finished
else:
# Patch waiting
all_finished = False
# Next patch part
current = current.next
# Patch finished
if all_finished:
self.first = None
# Return patched memory
return mem
| unlicense |
fsimkovic/cptbx | conkit/io/tests/test_pdb.py | 2 | 10444 | """Testing facility for conkit.io.PdbIO"""
__author__ = "Felix Simkovic"
__date__ = "26 Oct 2016"
import os
import unittest
from conkit.io.pdb import PdbParser
from conkit.io.tests.helpers import ParserTestCase
class TestPdbIO(ParserTestCase):
def test_read_1(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=8, atom_type="CB")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(2, len(contact_map1))
self.assertEqual([36, 86], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86, 208], [c.res2_seq for c in contact_map1 if c.true_positive])
self.assertEqual([0.934108, 0.920229], [c.raw_score for c in contact_map1 if c.true_positive])
def test_read_2(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=8, atom_type="CA")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(1, len(contact_map1))
self.assertEqual([36], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86], [c.res2_seq for c in contact_map1 if c.true_positive])
self.assertEqual([0.934927], [c.raw_score for c in contact_map1 if c.true_positive])
def test_read_3(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=7, atom_type="CB")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(1, len(contact_map1))
self.assertEqual([36], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86], [c.res2_seq for c in contact_map1 if c.true_positive])
self.assertEqual([0.934108], [c.raw_score for c in contact_map1 if c.true_positive])
def test_read_4(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
TER
ATOM 9 N TRP B 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP B 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP B 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP B 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE B 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE B 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE B 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE B 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=8, atom_type="CB")
# Two maps because no contacts in B
contact_map1 = contact_file["A"] # chain A
contact_map2 = contact_file["AB"] # chain AB
contact_map3 = contact_file["BA"] # chain BA
self.assertEqual(3, len(contact_file))
self.assertEqual(1, len(contact_map1))
self.assertEqual(["A", "A"], [contact_map1.top_contact.res1_chain, contact_map1.top_contact.res2_chain])
self.assertEqual([36, 86], [contact_map1.top_contact.res1_seq, contact_map1.top_contact.res2_seq])
self.assertEqual(1, len(contact_map2))
self.assertEqual(["A", "B"], [contact_map2.top_contact.res1_chain, contact_map2.top_contact.res2_chain])
self.assertEqual([86, 208], [contact_map2.top_contact.res1_seq, contact_map2.top_contact.res2_seq])
self.assertEqual(1, len(contact_map3))
self.assertEqual(["B", "A"], [contact_map3.top_contact.res1_chain, contact_map3.top_contact.res2_chain])
self.assertEqual([208, 86], [contact_map3.top_contact.res1_seq, contact_map3.top_contact.res2_seq])
def test_read_5(self):
content = """ATOM 1 N TYR A 36 39.107 51.628 3.103 0.50 43.13 N
ATOM 2 CA TYR A 36 38.300 50.814 2.204 0.50 41.80 C
ATOM 3 O TYR A 36 38.712 48.587 1.405 0.50 41.03 O
ATOM 4 CB TYR A 36 37.586 51.694 1.175 0.50 41.61 C
ATOM 5 N PHE A 86 32.465 47.498 5.487 0.50 25.81 N
ATOM 6 CA PHE A 86 32.670 48.303 4.288 0.50 26.45 C
ATOM 7 O PHE A 86 31.469 50.326 3.758 0.50 28.47 O
ATOM 8 CB PHE A 86 32.977 47.392 3.090 0.50 25.35 C
ATOM 9 N TRP A 171 23.397 37.507 -1.161 0.50 18.04 N
ATOM 10 CA TRP A 171 23.458 36.846 0.143 0.50 20.46 C
ATOM 11 O TRP A 171 22.235 34.954 0.951 0.50 22.45 O
ATOM 12 CB TRP A 171 23.647 37.866 1.275 0.50 18.83 C
ATOM 13 N PHE A 208 32.221 42.624 -5.829 0.50 19.96 N
ATOM 14 CA PHE A 208 31.905 43.710 -4.909 0.50 20.31 C
ATOM 15 O PHE A 208 32.852 45.936 -5.051 0.50 17.69 O
ATOM 16 CB PHE A 208 31.726 43.102 -3.518 0.50 19.90 C
END
"""
f_name = self.tempfile(content=content)
with open(f_name, "r") as f_in:
contact_file = PdbParser().read(f_in, distance_cutoff=0, atom_type="CB")
contact_map1 = contact_file.top_map
self.assertEqual(1, len(contact_file))
self.assertEqual(6, len(contact_map1))
self.assertEqual([36, 36, 36, 86, 86, 171], [c.res1_seq for c in contact_map1 if c.true_positive])
self.assertEqual([86, 171, 208, 171, 208, 208], [c.res2_seq for c in contact_map1 if c.true_positive])
if __name__ == "__main__":
unittest.main(verbosity=2)
| gpl-3.0 |
ncdesouza/bookworm | env/lib/python2.7/site-packages/jinja2/testsuite/core_tags.py | 412 | 11858 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
env = Environment()
class ForLoopTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self):
tmpl = env.from_string('{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self):
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=[0, 1]).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
self.assert_raises(TypeError, tmpl.render)
def test_recursive(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]')
def test_recursive_depth(self):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
self.assertEqual(tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]), '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]')
def test_looploop(self):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
self.assert_raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self):
t = env.from_string('{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) == '[True|True|False][False|True|False]'
def test_scoped_loop_var(self):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
class IfConditionTestCase(JinjaTestCase):
def test_simple(self):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self):
tmpl = env.from_string('{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string('{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
class MacrosTestCase(JinjaTestCase):
env = Environment(trim_blocks=True)
def test_simple(self):
tmpl = self.env.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self):
tmpl = self.env.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self):
tmpl = self.env.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_varargs(self):
tmpl = self.env.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self):
tmpl = self.env.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self):
tmpl = self.env.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self):
self.env = Environment(loader=DictLoader({'include':
'{% macro test(foo) %}[{{ foo }}]{% endmacro %}'}))
tmpl = self.env.from_string('{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self):
tmpl = self.env.from_string('{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self):
tmpl = self.env.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ForLoopTestCase))
suite.addTest(unittest.makeSuite(IfConditionTestCase))
suite.addTest(unittest.makeSuite(MacrosTestCase))
return suite
| gpl-3.0 |
iamroot12C/linux | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
noroutine/ansible | lib/ansible/utils/module_docs_fragments/openstack.py | 133 | 3961 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and
I(auth_type). This parameter is not needed if I(auth) is provided or if
OpenStack OS_* environment variables are present.
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I(password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided or
OpenStack OS_* environment variables are present.
required: false
auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
region_name:
description:
- Name of the region.
required: false
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
api_timeout:
description:
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
required: false
default: None
validate_certs:
description:
- Whether or not SSL API requests should be verified. Before 2.3 this defaulted to True.
required: false
default: null
aliases: ['verify']
cacert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
SSL API requests.
required: false
default: None
cert:
description:
- A path to a client certificate to use as part of the SSL transaction.
required: false
default: None
key:
description:
- A path to a client key to use as part of the SSL transaction.
required: false
default: None
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [public, internal, admin]
required: false
default: public
requirements:
- python >= 2.7
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be used instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays. More information can be found at
U(http://docs.openstack.org/developer/os-client-config)
'''
| gpl-3.0 |
crakensio/django_training | lib/python2.7/site-packages/pip/vcs/bazaar.py | 393 | 4943 | import os
import tempfile
import re
from pip.backwardcompat import urlparse
from pip.log import logger
from pip.util import rmtree, display_path, call_subprocess
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
bundle_file = 'bzr-branch.txt'
schemes = ('bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp')
guide = ('# This was a Bazaar branch; to make it a branch again run:\n'
'bzr branch -r %(rev)s %(url)s .\n')
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(['lp'])
urlparse.non_hierarchical.extend(['lp'])
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^bzr\s*branch\s*-r\s*(\d*)', line)
if match:
rev = match.group(1).strip()
url = line[match.end():].strip().split(None, 1)[0]
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Bazaar repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
call_subprocess([self.cmd, 'export', location], cwd=temp_dir,
filter_stdout=self._filter, show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
call_subprocess([self.cmd, 'switch', url], cwd=dest)
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = call_subprocess(
[self.cmd, 'info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = call_subprocess(
[self.cmd, 'revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| cc0-1.0 |
agoravoting/agora-results | agora_results/pipes/pdf.py | 1 | 18370 | # -*- coding:utf-8 -*-
# This file is part of agora-results.
# Copyright (C) 2016-2021 Agora Voting SL <[email protected]>
# agora-results is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora-results is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with agora-results. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import json
import requests
from datetime import datetime, timedelta
from reportlab.lib import colors
from reportlab.platypus import (
SimpleDocTemplate,
Paragraph,
Spacer,
Table,
TableStyle,
Image
)
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.lib.enums import TA_RIGHT, TA_LEFT, TA_CENTER
from reportlab.pdfgen import canvas
from reportlab.lib.units import mm
import gettext
import os
def configure_pdf(
data_list,
title=None,
first_description_paragraph=None,
last_description_paragraph=None,
languages=None
):
data = data_list[0]
data['pdf'] = {}
if title:
assert(isinstance(title, str))
data['pdf']['title'] = title
if first_description_paragraph:
assert(isinstance(first_description_paragraph, str))
data['pdf']['first_description_paragraph'] = first_description_paragraph
if last_description_paragraph:
assert(isinstance(last_description_paragraph, str))
data['pdf']['last_description_paragraph'] = last_description_paragraph
if languages:
assert(isinstance(languages, list))
for language in languages:
assert(isinstance(language, str))
data['pdf']['languages'] = languages
def gen_text(
text,
size=None,
bold=False,
align=None,
color='black',
fontName=None
):
if not isinstance(text, str):
text = text.__str__()
p = ParagraphStyle('test')
if fontName:
p.fontName = fontName
if size:
p.fontSize = size
p.leading = size * 1.2
if bold:
text = '<b>%s</b>' % text
p.textColor = color
if align:
p.alignment = align
return Paragraph(text, p)
def get_election_cfg(election_id):
headers = {'content-type': 'application/json'}
base_url = 'http://localhost:9000/api'
url = '%s/election/%d' % (base_url, election_id)
try:
r = requests.get(url, headers=headers, timeout=5)
except requests.exceptions.Timeout:
raise Exception(
'Timeout when requesting election_id = %s' % election_id
)
if r.status_code != 200:
print(r.status_code, r.text)
raise Exception(
'Invalid status code: %d for election_id = %s' % (
r.status_code,
election_id
)
)
return r.json()
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._saved_page_states.append(dict(self.__dict__))
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
num_pages = len(self._saved_page_states)
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number(num_pages)
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self, page_count):
self.setFont("Helvetica", 7)
self.drawRightString(200*mm, 20*mm,
"Page %d of %d" % (self._pageNumber, page_count))
def _header_footer(canvas, doc):
# Save the state of our canvas so we can draw on it
canvas.saveState()
styles = getSampleStyleSheet()
# Header
header = Image(
'/home/agoraelections/agora-results/img/nvotes_logo.jpg',
height=20,
width=80
)
header.hAlign = 'RIGHT'
w, h = header.wrap(doc.width, doc.topMargin)
header.drawOn(
canvas,
doc.width - w + doc.rightMargin,
doc.height + h + doc.bottomMargin - doc.topMargin
)
# Release the canvas
canvas.restoreState()
def pdf_print(election_results, config_folder, election_id):
localedir = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'locale'
)
translate = gettext.translation(
'pipes',
localedir,
languages=election_results.get('pdf', dict()).get('languages', None),
fallback=True
)
_ = translate.gettext
try:
jsonconfig = get_election_cfg(election_id)
election_title = jsonconfig['payload']['configuration']['title']
except:
election_title = ""
tx_description = _(
'Detailed and question by question results of the election ' +
'{election_id} titled <u>"{election_title}"</u>.'
).format(
election_id=election_id,
election_title=election_title
)
tx_title = _(
'Results of the election tally {election_id} - {election_title}'
).format(
election_id=election_id,
election_title=election_title
)
pdf_path = os.path.join(config_folder, "%s.results.pdf" % election_id)
styleSheet = getSampleStyleSheet()
doc = SimpleDocTemplate(
pdf_path,
rightMargin=50,
leftMargin=50,
topMargin=35,
bottomMargin=80
)
elements = []
the_title = tx_title
if 'pdf' in election_results and 'title' in election_results['pdf']:
the_title = election_results['pdf']['title']
elements.append(Spacer(0, 15))
elements.append(gen_text(the_title, size=20, bold=True, align = TA_LEFT))
elements.append(Spacer(0, 15))
if (
'pdf' in election_results and
'first_description_paragraph' in election_results['pdf']
):
elements.append(
gen_text(
election_results['pdf']['first_description_paragraph'],
size=12,
align=TA_LEFT
)
)
elements.append(Spacer(0, 15))
elements.append(gen_text(tx_description, size=12, align = TA_LEFT))
elements.append(Spacer(0, 15))
if (
'pdf' in election_results and
'last_description_paragraph' in election_results['pdf']
):
elements.append(
gen_text(
election_results['pdf']['last_description_paragraph'],
size=12,
align=TA_LEFT
)
)
elements.append(Spacer(0, 15))
doc.title = tx_title
'''
Returns the percentage points, ensuring it works with base=0
'''
def get_percentage(num, base):
if base == 0:
return 0
else:
return num/base
counts = election_results['results']['questions']
for question, i in zip(counts, range(len(counts))):
blank_votes = question['totals']['blank_votes']
null_votes = question['totals']['null_votes']
valid_votes = question['totals']['valid_votes']
total_votes = blank_votes + null_votes + valid_votes
percent_base = question['answer_total_votes_percentage']
if percent_base == "over-total-votes":
base_num = total_votes
elif percent_base == "over-total-valid-votes":
base_num = question['totals']['valid_votes']
elif (
"over-total-valid-points" == percent_base and
"valid_points" in question['totals']
):
base_num = question['totals']['valid_points']
elements.append(
gen_text(
_('Question {question_index}: {question_title}').format(
question_index=i+1,
question_title=question['title']
),
size=15,
bold=True,
align=TA_LEFT
)
)
elements.append(Spacer(0, 15))
t = Table([[
gen_text(
_('Configuration Data'),
align=TA_CENTER
)
]])
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,-1),'#b6d7a8'),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t.setStyle(table_style)
elements.append(t)
tally_type = {
"plurality-at-large": _(
"First past the post, Plurality or Plurality at Large"
),
"cumulative": _("Cumulative voting"),
"borda-nauru": _("Borda Nauru or Borda Dowdall (1/n)"),
"borda": "Borda Count (traditional)",
"pairwise-beta": _("Pairwise comparison (beta distribution)"),
"desborda3": _("Desborda3"),
"desborda2": _("Desborda2"),
"desborda": _("Desborda")
}
data = [
[
gen_text(
_('Tally system'),
align=TA_RIGHT
),
gen_text(tally_type[question['tally_type']], align=TA_LEFT)
],
[
gen_text(
_('Minimum number of options a voter can select'),
align=TA_RIGHT
),
gen_text(str(question['min']), align=TA_LEFT)
],
[
gen_text(
_('Maximum number of options a voter can select'),
align=TA_RIGHT
),
gen_text(str(question['max']), align=TA_LEFT)
],
[
gen_text(
_('Number of winning options'),
align=TA_RIGHT
),
gen_text(str(question['num_winners']), align=TA_LEFT)
],
[
gen_text(
_('Options appear in the voting booth in random order'),
align=TA_RIGHT
),
gen_text(
_('Yes')
if (
'shuffle_all_options' in question['extra_options'] and
question['extra_options']['shuffle_all_options']
)
else _('No'),
align=TA_LEFT
)
]
]
table_style = TableStyle(
[
('BACKGROUND',(0,0),(0,-1),'#efefef'),
('INNERGRID', (0,0), (-1,-1), 0.5, colors.grey),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t = Table(data)
t.setStyle(table_style)
elements.append(t)
elements.append(Spacer(0, 15))
t = Table(
[
[
gen_text(
_('Participation in question {question_index}').format(
question_index=i + 1
),
align=TA_CENTER
)
]
]
)
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,-1),'#b6d7a8'),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t.setStyle(table_style)
elements.append(t)
data = [
[
gen_text(_('Total number of votes cast'), align=TA_RIGHT),
gen_text(str(total_votes), align=TA_LEFT)
],
[
gen_text(_('Blank votes'), align=TA_RIGHT),
gen_text(
_(
"{blank_votes} ({percentage:.2%} over the total " +
"number of votes)"
).format(
blank_votes=blank_votes,
percentage=get_percentage(blank_votes, total_votes)
),
align=TA_LEFT
)
],
[
gen_text(_('Null votes'), align=TA_RIGHT),
gen_text(
_(
"{null_votes} ({percentage:.2%} over the total " +
"number of votes)"
).format(
null_votes=null_votes,
percentage=get_percentage(null_votes, total_votes)
),
align=TA_LEFT
)
],
[
gen_text(
_('Total number of votes for options'),
align=TA_RIGHT
),
gen_text(
_(
"{valid_votes} ({percentage:.2%} over the total " +
"number of votes)"
).format(
valid_votes=valid_votes,
percentage=get_percentage(valid_votes, total_votes)
),
align=TA_LEFT
)
],
[
gen_text(
_('Voting period start date'),
align=TA_RIGHT
),
gen_text(
str(
datetime.strptime(
jsonconfig['payload']['startDate'],
'%Y-%m-%dT%H:%M:%S.%f'
)
),
align=TA_LEFT
)
],
[
gen_text(
_('Voting period end date'),
align=TA_RIGHT
),
gen_text(
str(
datetime.strptime(
jsonconfig['payload']['endDate'],
'%Y-%m-%dT%H:%M:%S.%f'
)
),
align=TA_LEFT
)
],
[
gen_text(_('Tally end date'), align=TA_RIGHT),
gen_text(
str(
datetime.strptime(
jsonconfig['date'],
'%Y-%m-%d %H:%M:%S.%f'
)
),
align=TA_LEFT
)
]
]
table_style = TableStyle(
[
('BACKGROUND',(0,0),(0,-1),'#efefef'),
('INNERGRID', (0,0), (-1,-1), 0.5, colors.grey),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t=Table(data)
t.setStyle(table_style)
elements.append(t)
elements.append(Spacer(0, 15))
t = Table([[
gen_text(
_('Candidate results'),
align=TA_CENTER
)
]])
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,-1),'#b6d7a8'),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
t.setStyle(table_style)
elements.append(t)
winners = sorted(
[
answer
for answer in question['answers']
if answer['winner_position'] is not None
],
key=lambda a: a['winner_position']
)
losers_by_name = sorted(
[
answer for answer in question['answers']
if answer['winner_position'] is None
],
key=lambda a: a['text']
)
losers = sorted(
losers_by_name,
key=lambda a: float(a['total_count']),
reverse=True
)
data = [
[
gen_text(
_('Name'),
align=TA_RIGHT
),
gen_text(
_('Points'),
align=TA_CENTER
),
gen_text(
_('Winning position'),
align=TA_LEFT
)
]
]
table_style = TableStyle(
[
('BACKGROUND',(0,0),(-1,0),'#cccccc'),
('BACKGROUND',(0,1),(0,-1),'#efefef'),
('BACKGROUND',(-1,1),(-1,-1),'#efefef'),
('INNERGRID', (0,0), (-1,-1), 0.5, colors.grey),
('BOX', (0,0), (-1,-1), 0.5, colors.grey)
]
)
for answer in winners:
answer_text = answer['text']
if dict(title='isWriteInResult', url='true') in answer.get('urls', []):
answer_text = _('{candidate_text} (Write-in)').format(
candidate_text=answer['text']
)
data.append(
[
gen_text(answer_text, bold = True, align=TA_RIGHT),
gen_text(
'%d' % answer['total_count'],
bold=True,
align=TA_CENTER
),
gen_text(
'%dº' % (answer['winner_position'] + 1),
bold=True,
align=TA_LEFT
)
]
)
for loser in losers:
loser_text = loser['text']
if dict(title='isWriteInResult', url='true') in loser.get('urls', []):
loser_text = _('{candidate_text} (Write-in)').format(
candidate_text=loser['text']
)
data.append(
[
gen_text(loser_text, align=TA_RIGHT),
gen_text(
'%d' % loser['total_count'],
align=TA_CENTER
),
gen_text('-', align=TA_LEFT)
]
)
t = Table(data)
t.setStyle(table_style)
elements.append(t)
elements.append(Spacer(0, 15))
doc.build(
elements,
onFirstPage=_header_footer,
onLaterPages=_header_footer,
canvasmaker=NumberedCanvas
)
| agpl-3.0 |
PyLearner/tp-qemu | generic/tests/ioquit.py | 9 | 1206 | import logging
import time
import random
from autotest.client.shared import error
@error.context_aware
def run(test, params, env):
"""
Emulate the poweroff under IO workload(dd so far) with signal SIGKILL.
1) Boot a VM
2) Add IO workload for guest OS
3) Sleep for a random time
4) Kill the VM
:param test: Kvm test object
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
login_timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=login_timeout)
session2 = vm.wait_for_login(timeout=login_timeout)
bg_cmd = params.get("background_cmd")
error.context("Add IO workload for guest OS.", logging.info)
session.cmd_output(bg_cmd, timeout=60)
error.context("Verify the background process is running")
check_cmd = params.get("check_cmd")
session2.cmd(check_cmd, timeout=60)
error.context("Sleep for a random time", logging.info)
time.sleep(random.randrange(30, 100))
session2.cmd(check_cmd, timeout=60)
error.context("Kill the VM", logging.info)
vm.process.close()
| gpl-2.0 |
scripteed/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-strip-default.py | 232 | 2448 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the default STRIP_STYLEs match between different generators.
"""
import TestGyp
import re
import subprocess
import sys
import time
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR='strip'
test.run_gyp('test-defaults.gyp', chdir=CHDIR)
test.build('test-defaults.gyp', test.ALL, chdir=CHDIR)
# Lightweight check if stripping was done.
def OutPath(s):
return test.built_file_path(s, chdir=CHDIR)
def CheckNsyms(p, o_expected):
proc = subprocess.Popen(['nm', '-aU', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
# Filter out mysterious "00 0000 OPT radr://5614542" symbol which
# is apparently only printed on the bots (older toolchain?).
# Yes, "radr", not "rdar".
o = ''.join(filter(lambda s: 'radr://5614542' not in s, o.splitlines(True)))
o = o.replace('A', 'T')
o = re.sub(r'^[a-fA-F0-9]+', 'XXXXXXXX', o, flags=re.MULTILINE)
assert not proc.returncode
if o != o_expected:
print 'Stripping: Expected symbols """\n%s""", got """\n%s"""' % (
o_expected, o)
test.fail_test()
CheckNsyms(OutPath('libsingle_dylib.dylib'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_so.so'),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(OutPath('single_exe'),
"""\
XXXXXXXX T __mh_execute_header
""")
CheckNsyms(test.built_file_path(
'bundle_dylib.framework/Versions/A/bundle_dylib', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX t _the_hidden_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_so.bundle/Contents/MacOS/bundle_so', chdir=CHDIR),
"""\
XXXXXXXX S _ci
XXXXXXXX S _i
XXXXXXXX T _the_function
XXXXXXXX T _the_used_function
XXXXXXXX T _the_visible_function
""")
CheckNsyms(test.built_file_path(
'bundle_exe.app/Contents/MacOS/bundle_exe', chdir=CHDIR),
"""\
XXXXXXXX T __mh_execute_header
""")
test.pass_test()
| gpl-3.0 |
saimn/astropy | astropy/wcs/wcsapi/tests/test_utils.py | 11 | 1548 | import numpy as np
from numpy.testing import assert_allclose
import pytest
from pytest import raises
from astropy import units as u
from astropy.wcs import WCS
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs.wcsapi.utils import deserialize_class, wcs_info_str
def test_construct():
result = deserialize_class(('astropy.units.Quantity', (10,), {'unit': 'deg'}))
assert_quantity_allclose(result, 10 * u.deg)
def test_noconstruct():
result = deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}), construct=False)
assert result == (u.Quantity, (), {'unit': 'deg'})
def test_invalid():
with raises(ValueError) as exc:
deserialize_class(('astropy.units.Quantity', (), {'unit': 'deg'}, ()))
assert exc.value.args[0] == 'Expected a tuple of three values'
DEFAULT_1D_STR = """
WCS Transformation
This transformation has 1 pixel and 1 world dimensions
Array shape (Numpy order): None
Pixel Dim Axis Name Data size Bounds
0 None None None
World Dim Axis Name Physical Type Units
0 None None unknown
Correlation between pixel and world axes:
Pixel Dim
World Dim 0
0 yes
"""
def test_wcs_info_str():
# The tests in test_sliced_low_level_wcs.py excercise wcs_info_str
# extensively. This test is to ensure that the function exists and the
# API of the function works as expected.
wcs_empty = WCS(naxis=1)
assert wcs_info_str(wcs_empty).strip() == DEFAULT_1D_STR.strip()
| bsd-3-clause |
arnaudsj/suds | suds/servicedefinition.py | 200 | 8478 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{service definition} provides a textual representation of a service.
"""
from logging import getLogger
from suds import *
import suds.metrics as metrics
from suds.sax import Namespace
log = getLogger(__name__)
class ServiceDefinition:
"""
A service definition provides an object used to generate a textual description
of a service.
@ivar wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
@ivar service: The service object.
@type service: L{suds.wsdl.Service}
@ivar ports: A list of port-tuple: (port, [(method-name, pdef)])
@type ports: [port-tuple,..]
@ivar prefixes: A list of remapped prefixes.
@type prefixes: [(prefix,uri),..]
@ivar types: A list of type definitions
@type types: [I{Type},..]
"""
def __init__(self, wsdl, service):
"""
@param wsdl: A wsdl object
@type wsdl: L{Definitions}
@param service: A service B{name}.
@type service: str
"""
self.wsdl = wsdl
self.service = service
self.ports = []
self.params = []
self.types = []
self.prefixes = []
self.addports()
self.paramtypes()
self.publictypes()
self.getprefixes()
self.pushprefixes()
def pushprefixes(self):
"""
Add our prefixes to the wsdl so that when users invoke methods
and reference the prefixes, the will resolve properly.
"""
for ns in self.prefixes:
self.wsdl.root.addPrefix(ns[0], ns[1])
def addports(self):
"""
Look through the list of service ports and construct a list of tuples where
each tuple is used to describe a port and it's list of methods as:
(port, [method]). Each method is tuple: (name, [pdef,..] where each pdef is
a tuple: (param-name, type).
"""
timer = metrics.Timer()
timer.start()
for port in self.service.ports:
p = self.findport(port)
for op in port.binding.operations.values():
m = p[0].method(op.name)
binding = m.binding.input
method = (m.name, binding.param_defs(m))
p[1].append(method)
metrics.log.debug("method '%s' created: %s", m.name, timer)
p[1].sort()
timer.stop()
def findport(self, port):
"""
Find and return a port tuple for the specified port.
Created and added when not found.
@param port: A port.
@type port: I{service.Port}
@return: A port tuple.
@rtype: (port, [method])
"""
for p in self.ports:
if p[0] == p: return p
p = (port, [])
self.ports.append(p)
return p
def getprefixes(self):
"""
Add prefixes foreach namespace referenced by parameter types.
"""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns)
def paramtypes(self):
""" get all parameter types """
for m in [p[1] for p in self.ports]:
for p in [p[1] for p in m]:
for pd in p:
if pd[1] in self.params: continue
item = (pd[1], pd[1].resolve())
self.params.append(item)
def publictypes(self):
""" get all public types """
for t in self.wsdl.schema.types.values():
if t in self.params: continue
if t in self.types: continue
item = (t, t)
self.types.append(item)
tc = lambda x,y: cmp(x[0].name, y[0].name)
self.types.sort(cmp=tc)
def nextprefix(self):
"""
Get the next available prefix. This means a prefix starting with 'ns' with
a number appended as (ns0, ns1, ..) that is not already defined on the
wsdl document.
"""
used = [ns[0] for ns in self.prefixes]
used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()]
for n in range(0,1024):
p = 'ns%d'%n
if p not in used:
return p
raise Exception('prefixes exhausted')
def getprefix(self, u):
"""
Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
"""
for ns in Namespace.all:
if u == ns[1]: return ns[0]
for ns in self.prefixes:
if u == ns[1]: return ns[0]
raise Exception('ns (%s) not mapped' % u)
def xlate(self, type):
"""
Get a (namespace) translated I{qualified} name for specified type.
@param type: A schema type.
@type type: I{suds.xsd.sxbasic.SchemaObject}
@return: A translated I{qualified} name.
@rtype: str
"""
resolved = type.resolve()
name = resolved.name
if type.unbounded():
name += '[]'
ns = resolved.namespace()
if ns[1] == self.wsdl.tns[1]:
return name
prefix = self.getprefix(ns[1])
return ':'.join((prefix, name))
def description(self):
"""
Get a textual description of the service for which this object represents.
@return: A textual description.
@rtype: str
"""
s = []
indent = (lambda n : '\n%*s'%(n*3,' '))
s.append('Service ( %s ) tns="%s"' % (self.service.name, self.wsdl.tns[1]))
s.append(indent(1))
s.append('Prefixes (%d)' % len(self.prefixes))
for p in self.prefixes:
s.append(indent(2))
s.append('%s = "%s"' % p)
s.append(indent(1))
s.append('Ports (%d):' % len(self.ports))
for p in self.ports:
s.append(indent(2))
s.append('(%s)' % p[0].name)
s.append(indent(3))
s.append('Methods (%d):' % len(p[1]))
for m in p[1]:
sig = []
s.append(indent(4))
sig.append(m[0])
sig.append('(')
for p in m[1]:
sig.append(self.xlate(p[1]))
sig.append(' ')
sig.append(p[0])
sig.append(', ')
sig.append(')')
try:
s.append(''.join(sig))
except:
pass
s.append(indent(3))
s.append('Types (%d):' % len(self.types))
for t in self.types:
s.append(indent(4))
s.append(self.xlate(t[0]))
s.append('\n\n')
return ''.join(s)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
try:
return self.description()
except Exception, e:
log.exception(e)
return tostr(e) | lgpl-3.0 |
openstack/ironic | ironic/hacking/checks.py | 1 | 1950 | # Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from hacking import core
# N323: Found use of _() without explicit import of _!
UNDERSCORE_IMPORT_FILES = []
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
r"\(\s*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
@core.flake8ext
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line)
or custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line)
or string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _!")
| apache-2.0 |
divio/django | django/test/runner.py | 148 | 14807 | import logging
import os
import unittest
from importlib import import_module
from unittest import TestSuite, defaultTestLoader
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
from django.utils.datastructures import OrderedSet
from django.utils.six import StringIO
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super(DebugSQLTextTestResult, self).__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super(DebugSQLTextTestResult, self).startTest(test)
def stopTest(self, test):
super(DebugSQLTextTestResult, self).stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super(DebugSQLTextTestResult, self).addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super(DebugSQLTextTestResult, self).addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class DiscoverRunner(object):
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = TestSuite
test_runner = unittest.TextTestRunner
test_loader = defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_sql=False, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_sql = debug_sql
@classmethod
def add_arguments(cls, parser):
parser.add_argument('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.')
parser.add_argument('-p', '--pattern', action='store', dest='pattern',
default="test*.py",
help='The test matching pattern. Defaults to test*.py.')
parser.add_argument('-k', '--keepdb', action='store_true', dest='keepdb',
default=False,
help='Preserves the test DB between runs.')
parser.add_argument('-r', '--reverse', action='store_true', dest='reverse',
default=False,
help='Reverses test cases order.')
parser.add_argument('-d', '--debug-sql', action='store_true', dest='debug_sql',
default=False,
help='Prints logged SQL queries on failure.')
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, self.reorder_by, self.reverse)
def setup_databases(self, **kwargs):
return setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
**kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=resultclass,
).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity, self.keepdb)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def reorder_suite(suite, classes, reverse=False):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, tests within classes are sorted in opposite order,
but test classes are not reversed.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite(suite, classes, bins, reverse=False):
"""
Partitions a test suite by test type. Also prevents duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names, mirrors
| bsd-3-clause |
tiborsimko/invenio-pidstore | invenio_pidstore/providers/base.py | 1 | 3776 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Module storing implementations of PID providers."""
from __future__ import absolute_import, print_function
from ..models import PersistentIdentifier, PIDStatus
class BaseProvider(object):
"""Abstract class for persistent identifier provider classes."""
pid_type = None
"""Default persistent identifier type."""
pid_provider = None
"""Persistent identifier provider name."""
default_status = PIDStatus.NEW
"""Default status for newly created PIDs by this provider."""
@classmethod
def create(cls, pid_type=None, pid_value=None, object_type=None,
object_uuid=None, status=None, **kwargs):
"""Create a new instance for the given type and pid.
:param pid_type: Persistent identifier type. (Default: None).
:param pid_value: Persistent identifier value. (Default: None).
:param status: Current PID status.
(Default: :attr:`invenio_pidstore.models.PIDStatus.NEW`)
:param object_type: The object type is a string that identify its type.
(Default: None).
:param object_uuid: The object UUID. (Default: None).
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
assert pid_value
assert pid_type or cls.pid_type
pid = PersistentIdentifier.create(
pid_type or cls.pid_type,
pid_value,
pid_provider=cls.pid_provider,
object_type=object_type,
object_uuid=object_uuid,
status=status or cls.default_status,
)
return cls(pid, **kwargs)
@classmethod
def get(cls, pid_value, pid_type=None, **kwargs):
"""Get a persistent identifier for this provider.
:param pid_type: Persistent identifier type. (Default: configured
:attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`)
:param pid_value: Persistent identifier value.
:param kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider` required
initialization properties.
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
return cls(
PersistentIdentifier.get(pid_type or cls.pid_type, pid_value,
pid_provider=cls.pid_provider),
**kwargs)
def __init__(self, pid):
"""Initialize provider using persistent identifier.
:param pid: A :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
"""
self.pid = pid
assert pid.pid_provider == self.pid_provider
def reserve(self):
"""Reserve a persistent identifier.
This might or might not be useful depending on the service of the
provider.
See: :meth:`invenio_pidstore.models.PersistentIdentifier.reserve`.
"""
return self.pid.reserve()
def register(self):
"""Register a persistent identifier.
See: :meth:`invenio_pidstore.models.PersistentIdentifier.register`.
"""
return self.pid.register()
def update(self):
"""Update information about the persistent identifier."""
pass
def delete(self):
"""Delete a persistent identifier.
See: :meth:`invenio_pidstore.models.PersistentIdentifier.delete`.
"""
return self.pid.delete()
def sync_status(self):
"""Synchronize PIDstatus with remote service provider."""
pass
| mit |
djw8605/htcondor | src/condor_contrib/campus_factory/python-lib/campus_factory/Parsers.py | 7 | 3591 | import logging
import xml.sax.handler
import os
from select import select
from campus_factory.util.ExternalCommands import RunExternal
class AvailableGlideins(xml.sax.handler.ContentHandler, object):
# Command to query the collector for available glideins
command = "condor_status -avail -const '(IsUndefined(Offline) == True) || (Offline == false)' -format '<glidein name=\"%s\"/>' 'Name'"
def __init__(self):
self.owner_idle = {}
pass
def GetIdle(self):
self.idle = 0
self.found = False
# Get the xml from the collector
to_parse, stderr = RunExternal(self.command)
formatted_to_parse = "<doc>%s</doc>" % to_parse
# Parse the data
try:
xml.sax.parseString(formatted_to_parse, self)
except xml.sax._exceptions.SAXParseException, inst:
logging.error("Error parsing:")
logging.error("command = %s" % self.command)
logging.error("stderr = %s" % stderr)
logging.error("stdout = %s" % to_parse)
logging.error("Error: %s - %s" % ( str(inst), inst.args ))
if not self.found and (len(stderr) != 0):
logging.error("No valid output received from command: %s"% self.command)
logging.error("stderr = %s" % stderr)
logging.error("stdout = %s" % to_parse)
return None
return self.idle
def Run(self):
"""
Generic function for when this class is inherited
"""
return self.GetIdle()
def startElement(self, name, attributes):
if name == "glidein":
self.idle += 1
self.found = True
if not self.owner_idle.has_key(attributes['owner']):
self.owner_idle[attributes['owner']] = 0
self.owner_idle[attributes['owner']] += 1
def GetOwnerIdle(self):
return self.owner_idle
class IdleGlideins(AvailableGlideins):
command = "condor_q -const '(GlideinJob == true) && (JobStatus == 1)' -format '<glidein owner=\"%s\"/>' 'Owner'"
class IdleJobs(AvailableGlideins):
command = "condor_q -name %s -const '(GlideinJob =!= true) && (JobStatus == 1) && (JobUniverse == 5)' -format '<glidein owner=\"%%s\"/>' 'Owner'"
def __init__(self, schedd):
super(IdleJobs, self).__init__()
self.command = self.command % schedd
class IdleLocalJobs(AvailableGlideins):
command = "condor_q -const '(GlideinJob =!= true) && (JobStatus == 1) && (JobUniverse == 5)' -format '<glidein owner=\"%s\"/>' 'Owner'"
class FactoryID(AvailableGlideins):
command = "condor_q -const '(IsUndefined(IsFactory) == FALSE)' -format '<factory id=\"%s\"/>' 'ClusterId'"
def startElement(self, name, attributes):
if name == "factory":
self.factory_id = attributes.getValue("id")
self.found = True
def GetId(self):
self.GetIdle()
return self.factory_id
class RunningGlideinsJobs(AvailableGlideins):
"""
Gets the number of running glidein jobs (condor_q)
"""
command = "condor_q -const '(GlideinJob == true) && (JobStatus == 2)' -format '<glidein owner=\"%s\"/>' 'Owner'"
class RunningGlideins(AvailableGlideins):
"""
Returns the number of startd's reporting to the collector (condor_status)
"""
command = "condor_status -const '(IsUndefined(IS_GLIDEIN) == FALSE) && (IS_GLIDEIN == TRUE) && (IsUndefined(Offline))' -format '<glidein name=\"%s\"/>' 'Name'"
| apache-2.0 |
linuxlewis/channels | channels/asgi.py | 4 | 3137 | from __future__ import unicode_literals
import django
from django.conf import settings
from django.utils.module_loading import import_string
from .routing import Router
from .utils import name_that_thing
class InvalidChannelLayerError(ValueError):
pass
class ChannelLayerManager(object):
"""
Takes a settings dictionary of backends and initialises them on request.
"""
def __init__(self):
self.backends = {}
@property
def configs(self):
# Lazy load settings so we can be imported
return getattr(settings, "CHANNEL_LAYERS", {})
def make_backend(self, name):
# Load the backend class
try:
backend_class = import_string(self.configs[name]['BACKEND'])
except KeyError:
raise InvalidChannelLayerError("No BACKEND specified for %s" % name)
except ImportError:
raise InvalidChannelLayerError(
"Cannot import BACKEND %r specified for %s" % (self.configs[name]['BACKEND'], name)
)
# Get routing
try:
routing = self.configs[name]['ROUTING']
except KeyError:
raise InvalidChannelLayerError("No ROUTING specified for %s" % name)
# Initialise and pass config
asgi_layer = backend_class(**self.configs[name].get("CONFIG", {}))
return ChannelLayerWrapper(
channel_layer=asgi_layer,
alias=name,
routing=routing,
)
def __getitem__(self, key):
if key not in self.backends:
self.backends[key] = self.make_backend(key)
return self.backends[key]
def __contains__(self, key):
return key in self.configs
def set(self, key, layer):
"""
Sets an alias to point to a new ChannelLayerWrapper instance, and
returns the old one that it replaced. Useful for swapping out the
backend during tests.
"""
old = self.backends.get(key, None)
self.backends[key] = layer
return old
class ChannelLayerWrapper(object):
"""
Top level channel layer wrapper, which contains both the ASGI channel
layer object as well as alias and routing information specific to Django.
"""
def __init__(self, channel_layer, alias, routing):
self.channel_layer = channel_layer
self.alias = alias
self.routing = routing
self.router = Router(self.routing)
def __getattr__(self, name):
return getattr(self.channel_layer, name)
def __str__(self):
return "%s (%s)" % (self.alias, name_that_thing(self.channel_layer))
def local_only(self):
# TODO: Can probably come up with a nicer check?
return "inmemory" in self.channel_layer.__class__.__module__
def get_channel_layer(alias="default"):
"""
Returns the raw ASGI channel layer for this project.
"""
if django.VERSION[1] > 9:
django.setup(set_prefix=False)
else:
django.setup()
return channel_layers[alias].channel_layer
# Default global instance of the channel layer manager
channel_layers = ChannelLayerManager()
| bsd-3-clause |
dyoung418/tensorflow | tensorflow/python/keras/_impl/keras/applications/xception_test.py | 35 | 2109 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Xception application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class XceptionTest(test.TestCase):
def test_with_top(self):
model = keras.applications.Xception(weights=None)
self.assertEqual(model.output_shape, (None, 1000))
def test_no_top(self):
model = keras.applications.Xception(weights=None, include_top=False)
self.assertEqual(model.output_shape, (None, None, None, 2048))
def test_with_pooling(self):
model = keras.applications.Xception(weights=None,
include_top=False,
pooling='avg')
self.assertEqual(model.output_shape, (None, 2048))
def test_weight_loading(self):
with self.assertRaises(ValueError):
keras.applications.Xception(weights='unknown',
include_top=False)
with self.assertRaises(ValueError):
keras.applications.Xception(weights='imagenet',
classes=2000)
def test_preprocess_input(self):
x = np.random.uniform(0, 255, (2, 300, 200, 3))
out1 = keras.applications.xception.preprocess_input(x)
self.assertAllClose(np.mean(out1), 0., atol=0.1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
romankagan/DDBWorkbench | python/lib/Lib/HTMLParser.py | 103 | 12662 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("'", "'")
s = s.replace(""", '"')
s = s.replace("&", "&") # Must be last
return s
| apache-2.0 |
axinging/chromium-crosswalk | third_party/Python-Markdown/markdown/extensions/meta.py | 114 | 2400 | """
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://pythonhosted.org/Markdown/extensions/meta_data.html>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta",
MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)
| bsd-3-clause |
chanderbgoel/pybrain | pybrain/supervised/evolino/filter.py | 25 | 9839 | from __future__ import print_function
__author__ = 'Michael Isik'
from pybrain.supervised.evolino.gfilter import Filter, SimpleMutation
from pybrain.supervised.evolino.variate import CauchyVariate
from pybrain.supervised.evolino.population import SimplePopulation
from pybrain.tools.validation import Validator
from pybrain.tools.kwargsprocessor import KWArgsProcessor
from numpy import array, dot, concatenate, Infinity
from scipy.linalg import pinv2
from copy import deepcopy
class EvolinoEvaluation(Filter):
""" Evaluate all individuals of the Evolino population, and store their
fitness value inside the population.
"""
def __init__(self, evolino_network, dataset, **kwargs):
""" :key evolino_network: an instance of NetworkWrapper()
:key dataset: The evaluation dataset
:key evalfunc: Compares output to target values and returns a scalar, denoting the fitness.
Defaults to -mse(output, target).
:key wtRatio: Float array of two values denoting the ratio between washout and training length.
Defaults to [1,2]
:key verbosity: Verbosity level. Defaults to 0
"""
Filter.__init__(self)
ap = KWArgsProcessor(self, kwargs)
ap.add('verbosity', default=0)
ap.add('evalfunc', default=lambda output, target:-Validator.MSE(output, target))
ap.add('wtRatio', default=array([1, 2], float))
self.network = evolino_network
self.dataset = dataset
self.max_fitness = -Infinity
def _evaluateNet(self, net, dataset, wtRatio):
""" Evaluates the performance of net on the given dataset.
Returns the fitness value.
:key net: Instance of EvolinoNetwork to evaluate
:key dataset: Sequences to test the net on
:key wtRatio: See __init__
"""
# === extract sequences from dataset ===
numSequences = dataset.getNumSequences()
washout_sequences = []
training_sequences = []
for i in range(numSequences):
sequence = dataset.getSequence(i)[1]
training_start = int(wtRatio * len(sequence))
washout_sequences.append(sequence[ : training_start ])
training_sequences.append(sequence[ training_start : ])
# === collect raw output (denoted by phi) ===
phis = []
for i in range(numSequences):
net.reset()
net.washout(washout_sequences[i])
phi = net.washout(training_sequences[i])
phis.append(phi)
# === calculate and set weights of linear output layer ===
PHI = concatenate(phis).T
PHI_INV = pinv2(PHI)
TARGET = concatenate(training_sequences).T
W = dot(TARGET, PHI_INV)
net.setOutputWeightMatrix(W)
# === collect outputs by applying the newly configured network ===
outputs = []
for i in range(numSequences):
out = net.extrapolate(washout_sequences[i], len(training_sequences[i]))
outputs.append(out)
# === calculate fitness value ===
OUTPUT = concatenate(outputs)
TARGET = concatenate(training_sequences)
fitness = self.evalfunc(OUTPUT, TARGET)
return fitness
def apply(self, population):
""" Evaluate each individual, and store fitness inside population.
Also calculate and set the weight matrix W of the linear output layer.
:arg population: Instance of EvolinoPopulation
"""
net = self.network
dataset = self.dataset
population.clearFitness()
best_W = None
best_fitness = -Infinity
# iterate all individuals. Note, that these individuals are created on the fly
for individual in population.getIndividuals():
# load the individual's genome into the weights of the net
net.setGenome(individual.getGenome())
fitness = self._evaluateNet(net, dataset, self.wtRatio)
if self.verbosity > 1:
print(("Calculated fitness for individual", id(individual), " is ", fitness))
# set the individual fitness
population.setIndividualFitness(individual, fitness)
if best_fitness < fitness:
best_fitness = fitness
best_genome = deepcopy(individual.getGenome())
best_W = deepcopy(net.getOutputWeightMatrix())
net.reset()
net.setGenome(best_genome)
net.setOutputWeightMatrix(best_W)
# store fitness maximum to use it for triggering burst mutation
self.max_fitness = best_fitness
class EvolinoSelection(Filter):
""" Evolino's selection operator.
Set its nParents attribute at any time.
nParents specifies the number of individuals not to be deleted.
If nParents equals None, EvolinoSubSelection will use its
default value.
"""
def __init__(self):
Filter.__init__(self)
self.nParents = None
self.sub_selection = EvolinoSubSelection()
def apply(self, population):
""" The subpopulations of the EvolinoPopulation are iterated and forwarded
to the EvolinoSubSelection() operator.
:arg population: object of type EvolinoPopulation
"""
self.sub_selection.nParents = self.nParents
for sp in population.getSubPopulations():
self.sub_selection.apply(sp)
class EvolinoReproduction(Filter):
""" Evolino's reproduction operator """
def __init__(self, **kwargs):
""" :key **kwargs: will be forwarded to the EvolinoSubReproduction constructor
"""
Filter.__init__(self)
self._kwargs = kwargs
def apply(self, population):
""" The subpopulations of the EvolinoPopulation are iterated and forwarded
to the EvolinoSubReproduction() operator.
:arg population: object of type EvolinoPopulation
"""
sps = population.getSubPopulations()
reproduction = EvolinoSubReproduction(**self._kwargs)
for sp in sps:
reproduction.apply(sp)
class EvolinoBurstMutation(Filter):
""" The burst mutation operator for evolino """
def __init__(self, **kwargs):
""" :key **kwargs: will be forwarded to the EvolinoSubReproduction constructor
"""
Filter.__init__(self)
self._kwargs = kwargs
def apply(self, population):
""" Keeps just the best fitting individual of each subpopulation.
All other individuals are erased. After that, the kept best fitting
individuals will be used for reproduction, in order to refill the
sub-populations.
"""
sps = population.getSubPopulations()
for sp in sps:
n_toremove = sp.getIndividualsN() - 1
sp.removeWorstIndividuals(n_toremove)
reproduction = EvolinoSubReproduction(**self._kwargs)
reproduction.apply(sp)
# ==================================================== SubPopulation related ===
class EvolinoSubSelection(Filter):
""" Selection operator for EvolinoSubPopulation objects
Specify its nParents attribute at any time. See EvolinoSelection.
"""
def __init__(self):
Filter.__init__(self)
def apply(self, population):
""" Simply removes some individuals with lowest fitness values
"""
n = population.getIndividualsN()
if self.nParents is None:
nKeep = n // 4
else:
nKeep = self.nParents
assert nKeep >= 0
assert nKeep <= n
population.removeWorstIndividuals(n - nKeep)
class EvolinoSubReproduction(Filter):
""" Reproduction operator for EvolinoSubPopulation objects.
"""
def __init__(self, **kwargs):
""" :key verbosity: Verbosity level
:key mutationVariate: Variate used for mutation. Defaults to None
:key mutation: Defaults to EvolinoSubMutation
"""
Filter.__init__(self)
ap = KWArgsProcessor(self, kwargs)
ap.add('verbosity', default=0)
ap.add('mutationVariate', default=None)
ap.add('mutation', default=EvolinoSubMutation())
if self.mutationVariate is not None:
self.mutation.mutationVariate = self.mutationVariate
def apply(self, population):
""" First determines the number of individuals to be created.
Then clones the fittest individuals (=parents), mutates these clones
and adds them to the population.
"""
max_n = population.getMaxNIndividuals()
n = population.getIndividualsN()
freespace = max_n - n
best = population.getBestIndividualsSorted(freespace)
children = set()
while True:
if len(children) >= freespace: break
for parent in best:
children.add(parent.copy())
if len(children) >= freespace: break
dummy_population = SimplePopulation()
dummy_population.addIndividuals(children)
self.mutation.apply(dummy_population)
population.addIndividuals(dummy_population.getIndividuals())
assert population.getMaxNIndividuals() == population.getIndividualsN()
class EvolinoSubMutation(SimpleMutation):
""" Mutation operator for EvolinoSubPopulation objects.
Like SimpleMutation, except, that CauchyVariate is used by default.
"""
def __init__(self, **kwargs):
SimpleMutation.__init__(self)
ap = KWArgsProcessor(self, kwargs)
ap.add('mutationVariate', default=CauchyVariate())
self.mutationVariate.alpha = 0.001
| bsd-3-clause |
Shrews/PyGerrit | webapp/django/contrib/localflavor/es/es_provinces.py | 436 | 1482 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
PROVINCE_CHOICES = (
('01', _('Arava')),
('02', _('Albacete')),
('03', _('Alacant')),
('04', _('Almeria')),
('05', _('Avila')),
('06', _('Badajoz')),
('07', _('Illes Balears')),
('08', _('Barcelona')),
('09', _('Burgos')),
('10', _('Caceres')),
('11', _('Cadiz')),
('12', _('Castello')),
('13', _('Ciudad Real')),
('14', _('Cordoba')),
('15', _('A Coruna')),
('16', _('Cuenca')),
('17', _('Girona')),
('18', _('Granada')),
('19', _('Guadalajara')),
('20', _('Guipuzkoa')),
('21', _('Huelva')),
('22', _('Huesca')),
('23', _('Jaen')),
('24', _('Leon')),
('25', _('Lleida')),
('26', _('La Rioja')),
('27', _('Lugo')),
('28', _('Madrid')),
('29', _('Malaga')),
('30', _('Murcia')),
('31', _('Navarre')),
('32', _('Ourense')),
('33', _('Asturias')),
('34', _('Palencia')),
('35', _('Las Palmas')),
('36', _('Pontevedra')),
('37', _('Salamanca')),
('38', _('Santa Cruz de Tenerife')),
('39', _('Cantabria')),
('40', _('Segovia')),
('41', _('Seville')),
('42', _('Soria')),
('43', _('Tarragona')),
('44', _('Teruel')),
('45', _('Toledo')),
('46', _('Valencia')),
('47', _('Valladolid')),
('48', _('Bizkaia')),
('49', _('Zamora')),
('50', _('Zaragoza')),
('51', _('Ceuta')),
('52', _('Melilla')),
)
| apache-2.0 |
abhiQmar/servo | tests/wpt/web-platform-tests/check_stability.py | 9 | 26373 | from __future__ import print_function
import argparse
import logging
import os
import re
import stat
import subprocess
import sys
import tarfile
import zipfile
from abc import ABCMeta, abstractmethod
from cStringIO import StringIO as CStringIO
from collections import defaultdict
from ConfigParser import RawConfigParser
from io import BytesIO, StringIO
import requests
BaseHandler = None
LogActionFilter = None
LogHandler = None
LogLevelFilter = None
StreamHandler = None
TbplFormatter = None
manifest = None
reader = None
wptcommandline = None
wptrunner = None
wpt_root = None
wptrunner_root = None
logger = None
def do_delayed_imports():
"""Import and set up modules only needed if execution gets to this point."""
global BaseHandler
global LogLevelFilter
global StreamHandler
global TbplFormatter
global manifest
global reader
global wptcommandline
global wptrunner
from mozlog import reader
from mozlog.formatters import TbplFormatter
from mozlog.handlers import BaseHandler, LogLevelFilter, StreamHandler
from tools.manifest import manifest
from wptrunner import wptcommandline, wptrunner
setup_log_handler()
setup_action_filter()
def setup_logging():
"""Set up basic debug logger."""
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def setup_action_filter():
"""Create global LogActionFilter class as part of deferred module load."""
global LogActionFilter
class LogActionFilter(BaseHandler):
"""Handler that filters out messages not of a given set of actions.
Subclasses BaseHandler.
:param inner: Handler to use for messages that pass this filter
:param actions: List of actions for which to fire the handler
"""
def __init__(self, inner, actions):
"""Extend BaseHandler and set inner and actions props on self."""
BaseHandler.__init__(self, inner)
self.inner = inner
self.actions = actions
def __call__(self, item):
"""Invoke handler if action is in list passed as constructor param."""
if item["action"] in self.actions:
return self.inner(item)
class TravisFold(object):
"""Context for TravisCI folding mechanism. Subclasses object.
See: https://blog.travis-ci.com/2013-05-22-improving-build-visibility-log-folds/
"""
def __init__(self, name):
"""Register TravisCI folding section name."""
self.name = name
def __enter__(self):
"""Emit fold start syntax."""
print("travis_fold:start:%s" % self.name, file=sys.stderr)
def __exit__(self, type, value, traceback):
"""Emit fold end syntax."""
print("travis_fold:end:%s" % self.name, file=sys.stderr)
class FilteredIO(object):
"""Wrap a file object, invoking the provided callback for every call to
`write` and only proceeding with the operation when that callback returns
True."""
def __init__(self, original, on_write):
self.original = original
self.on_write = on_write
def __getattr__(self, name):
return getattr(self.original, name)
def disable(self):
self.write = lambda msg: None
def write(self, msg):
encoded = msg.encode("utf8", "backslashreplace").decode("utf8")
if self.on_write(self.original, encoded) is True:
self.original.write(encoded)
def replace_streams(capacity, warning_msg):
# Value must be boxed to support modification from inner function scope
count = [0]
capacity -= 2 + len(warning_msg)
stderr = sys.stderr
def on_write(handle, msg):
length = len(msg)
count[0] += length
if count[0] > capacity:
sys.stdout.disable()
sys.stderr.disable()
handle.write(msg[0:capacity - count[0]])
handle.flush()
stderr.write("\n%s\n" % warning_msg)
return False
return True
sys.stdout = FilteredIO(sys.stdout, on_write)
sys.stderr = FilteredIO(sys.stderr, on_write)
class Browser(object):
__metaclass__ = ABCMeta
@abstractmethod
def install(self):
return NotImplemented
@abstractmethod
def install_webdriver(self):
return NotImplemented
@abstractmethod
def version(self):
return NotImplemented
@abstractmethod
def wptrunner_args(self):
return NotImplemented
class Firefox(Browser):
"""Firefox-specific interface.
Includes installation, webdriver installation, and wptrunner setup methods.
"""
product = "firefox"
binary = "%s/firefox/firefox"
platform_ini = "%s/firefox/platform.ini"
def install(self):
"""Install Firefox."""
call("pip", "install", "-r", os.path.join(wptrunner_root, "requirements_firefox.txt"))
resp = get("https://archive.mozilla.org/pub/firefox/nightly/latest-mozilla-central/firefox-53.0a1.en-US.linux-x86_64.tar.bz2")
untar(resp.raw)
if not os.path.exists("profiles"):
os.mkdir("profiles")
with open(os.path.join("profiles", "prefs_general.js"), "wb") as f:
resp = get("https://hg.mozilla.org/mozilla-central/raw-file/tip/testing/profiles/prefs_general.js")
f.write(resp.content)
call("pip", "install", "-r", os.path.join(wptrunner_root, "requirements_firefox.txt"))
def _latest_geckodriver_version(self):
"""Get and return latest version number for geckodriver."""
# This is used rather than an API call to avoid rate limits
tags = call("git", "ls-remote", "--tags", "--refs",
"https://github.com/mozilla/geckodriver.git")
release_re = re.compile(".*refs/tags/v(\d+)\.(\d+)\.(\d+)")
latest_release = 0
for item in tags.split("\n"):
m = release_re.match(item)
if m:
version = [int(item) for item in m.groups()]
if version > latest_release:
latest_release = version
assert latest_release != 0
return "v%s.%s.%s" % tuple(str(item) for item in latest_release)
def install_webdriver(self):
"""Install latest Geckodriver."""
version = self._latest_geckodriver_version()
logger.debug("Latest geckodriver release %s" % version)
url = "https://github.com/mozilla/geckodriver/releases/download/%s/geckodriver-%s-linux64.tar.gz" % (version, version)
untar(get(url).raw)
def version(self, root):
"""Retrieve the release version of the installed browser."""
platform_info = RawConfigParser()
with open(self.platform_ini % root, "r") as fp:
platform_info.readfp(BytesIO(fp.read()))
return "BuildID %s; SourceStamp %s" % (
platform_info.get("Build", "BuildID"),
platform_info.get("Build", "SourceStamp"))
def wptrunner_args(self, root):
"""Return Firefox-specific wpt-runner arguments."""
return {
"product": "firefox",
"binary": self.binary % root,
"certutil_binary": "certutil",
"webdriver_binary": "%s/geckodriver" % root,
"prefs_root": "%s/profiles" % root,
}
class Chrome(Browser):
"""Chrome-specific interface.
Includes installation, webdriver installation, and wptrunner setup methods.
"""
product = "chrome"
binary = "/usr/bin/google-chrome"
def install(self):
"""Install Chrome."""
# Installing the Google Chrome browser requires administrative
# privileges, so that installation is handled by the invoking script.
call("pip", "install", "-r", os.path.join(wptrunner_root, "requirements_chrome.txt"))
def install_webdriver(self):
"""Install latest Webdriver."""
latest = get("http://chromedriver.storage.googleapis.com/LATEST_RELEASE").text.strip()
url = "http://chromedriver.storage.googleapis.com/%s/chromedriver_linux64.zip" % latest
unzip(get(url).raw)
st = os.stat('chromedriver')
os.chmod('chromedriver', st.st_mode | stat.S_IEXEC)
def version(self, root):
"""Retrieve the release version of the installed browser."""
output = call(self.binary, "--version")
return re.search(r"[0-9\.]+( [a-z]+)?$", output.strip()).group(0)
def wptrunner_args(self, root):
"""Return Chrome-specific wpt-runner arguments."""
return {
"product": "chrome",
"binary": self.binary,
"webdriver_binary": "%s/chromedriver" % root,
"test_types": ["testharness", "reftest"]
}
def get(url):
"""Issue GET request to a given URL and return the response."""
logger.debug("GET %s" % url)
resp = requests.get(url, stream=True)
resp.raise_for_status()
return resp
def call(*args):
"""Log terminal command, invoke it as a subprocess.
Returns a bytestring of the subprocess output if no error.
"""
logger.debug("%s" % " ".join(args))
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
logger.critical("%s exited with return code %i" %
(e.cmd, e.returncode))
logger.critical(e.output)
raise
def get_git_cmd(repo_path):
"""Create a function for invoking git commands as a subprocess."""
def git(cmd, *args):
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logger.error("Git command exited with status %i" % e.returncode)
logger.error(e.output)
sys.exit(1)
return git
def seekable(fileobj):
"""Attempt to use file.seek on given file, with fallbacks."""
try:
fileobj.seek(fileobj.tell())
except Exception:
return CStringIO(fileobj.read())
else:
return fileobj
def untar(fileobj):
"""Extract tar archive."""
logger.debug("untar")
fileobj = seekable(fileobj)
with tarfile.open(fileobj=fileobj) as tar_data:
tar_data.extractall()
def unzip(fileobj):
"""Extract zip archive."""
logger.debug("unzip")
fileobj = seekable(fileobj)
with zipfile.ZipFile(fileobj) as zip_data:
for info in zip_data.infolist():
zip_data.extract(info)
perm = info.external_attr >> 16 & 0x1FF
os.chmod(info.filename, perm)
class pwd(object):
"""Create context for temporarily changing present working directory."""
def __init__(self, dir):
self.dir = dir
self.old_dir = None
def __enter__(self):
self.old_dir = os.path.abspath(os.curdir)
os.chdir(self.dir)
def __exit__(self, *args, **kwargs):
os.chdir(self.old_dir)
self.old_dir = None
def fetch_wpt_master(user):
"""Fetch the master branch via git."""
git = get_git_cmd(wpt_root)
git("fetch", "https://github.com/%s/web-platform-tests.git" % user, "master:master")
def get_sha1():
""" Get and return sha1 of current git branch HEAD commit."""
git = get_git_cmd(wpt_root)
return git("rev-parse", "HEAD").strip()
def build_manifest():
"""Build manifest of all files in web-platform-tests"""
with pwd(wpt_root):
# TODO: Call the manifest code directly
call("python", "manifest")
def install_wptrunner():
"""Clone and install wptrunner."""
call("git", "clone", "--depth=1", "https://github.com/w3c/wptrunner.git", wptrunner_root)
git = get_git_cmd(wptrunner_root)
git("submodule", "update", "--init", "--recursive")
call("pip", "install", wptrunner_root)
def get_files_changed():
"""Get and return files changed since current branch diverged from master."""
root = os.path.abspath(os.curdir)
git = get_git_cmd(wpt_root)
branch_point = git("merge-base", "HEAD", "master").strip()
logger.debug("Branch point from master: %s" % branch_point)
files = git("diff", "--name-only", "-z", "%s.." % branch_point)
if not files:
return []
assert files[-1] == "\0"
return [os.path.join(wpt_root, item)
for item in files[:-1].split("\0")]
def get_affected_testfiles(files_changed):
"""Determine and return list of test files that reference changed files."""
affected_testfiles = set()
nontests_changed = set(files_changed)
manifest_file = os.path.join(wpt_root, "MANIFEST.json")
skip_dirs = ["conformance-checkers", "docs", "tools"]
test_types = ["testharness", "reftest", "wdspec"]
wpt_manifest = manifest.load(wpt_root, manifest_file)
support_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes("support")}
test_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes(*test_types)}
nontests_changed = nontests_changed.intersection(support_files)
nontest_changed_paths = set()
for full_path in nontests_changed:
rel_path = os.path.relpath(full_path, wpt_root)
path_components = rel_path.split(os.sep)
if len(path_components) < 2:
# This changed file is in the repo root, so skip it
# (because it's not part of any test).
continue
top_level_subdir = path_components[0]
if top_level_subdir in skip_dirs:
continue
repo_path = "/" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, "/")
nontest_changed_paths.add((full_path, repo_path))
for root, dirs, fnames in os.walk(wpt_root):
# Walk top_level_subdir looking for test files containing either the
# relative filepath or absolute filepatch to the changed files.
if root == wpt_root:
for dir_name in skip_dirs:
dirs.remove(dir_name)
for fname in fnames:
test_full_path = os.path.join(root, fname)
# Skip any file that's not a test file.
if test_full_path not in test_files:
continue
with open(test_full_path, "rb") as fh:
file_contents = fh.read()
if file_contents.startswith("\xfe\xff"):
file_contents = file_contents.decode("utf-16be")
elif file_contents.startswith("\xff\xfe"):
file_contents = file_contents.decode("utf-16le")
for full_path, repo_path in nontest_changed_paths:
rel_path = os.path.relpath(full_path, root).replace(os.path.sep, "/")
if rel_path in file_contents or repo_path in file_contents:
affected_testfiles.add(test_full_path)
continue
return affected_testfiles
def wptrunner_args(root, files_changed, iterations, browser):
"""Derive and return arguments for wpt-runner."""
parser = wptcommandline.create_parser([browser.product])
args = vars(parser.parse_args([]))
args.update(browser.wptrunner_args(root))
args.update({
"tests_root": wpt_root,
"metadata_root": wpt_root,
"repeat": iterations,
"config": "%s//wptrunner.default.ini" % (wptrunner_root),
"test_list": files_changed,
"restart_on_unexpected": False,
"pause_after_test": False
})
wptcommandline.check_args(args)
return args
def setup_log_handler():
"""Set up LogHandler class as part of deferred module load."""
global LogHandler
class LogHandler(reader.LogHandler):
"""Handle updating test and subtest status in log.
Subclasses reader.LogHandler.
"""
def __init__(self):
self.results = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
def test_status(self, data):
self.results[data["test"]][data.get("subtest")][data["status"]] += 1
def test_end(self, data):
self.results[data["test"]][None][data["status"]] += 1
def is_inconsistent(results_dict, iterations):
"""Return whether or not a single test is inconsistent."""
return len(results_dict) > 1 or sum(results_dict.values()) != iterations
def err_string(results_dict, iterations):
"""Create and return string with errors from test run."""
rv = []
total_results = sum(results_dict.values())
for key, value in sorted(results_dict.items()):
rv.append("%s%s" %
(key, ": %s/%s" % (value, iterations) if value != iterations else ""))
rv = ", ".join(rv)
if total_results < iterations:
rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
if len(results_dict) > 1 or total_results != iterations:
rv = "**%s**" % rv
return rv
def process_results(log, iterations):
"""Process test log and return overall results and list of inconsistent tests."""
inconsistent = []
handler = LogHandler()
reader.handle_log(reader.read(log), handler)
results = handler.results
for test, test_results in results.iteritems():
for subtest, result in test_results.iteritems():
if is_inconsistent(result, iterations):
inconsistent.append((test, subtest, result))
return results, inconsistent
def format_comment_title(product):
"""Produce a Markdown-formatted string based on a given "product"--a string
containing a browser identifier optionally followed by a colon and a
release channel. (For example: "firefox" or "chrome:dev".) The generated
title string is used both to create new comments and to locate (and
subsequently update) previously-submitted comments."""
parts = product.split(":")
title = parts[0].title()
if len(parts) > 1:
title += " (%s channel)" % parts[1]
return "# %s #" % title
def markdown_adjust(s):
"""Escape problematic markdown sequences."""
s = s.replace('\t', u'\\t')
s = s.replace('\n', u'\\n')
s = s.replace('\r', u'\\r')
s = s.replace('`', u'\\`')
return s
def table(headings, data, log):
"""Create and log data to specified logger in tabular format."""
cols = range(len(headings))
assert all(len(item) == len(cols) for item in data)
max_widths = reduce(lambda prev, cur: [(len(cur[i]) + 2)
if (len(cur[i]) + 2) > prev[i]
else prev[i]
for i in cols],
data,
[len(item) + 2 for item in headings])
log("|%s|" % "|".join(item.center(max_widths[i]) for i, item in enumerate(headings)))
log("|%s|" % "|".join("-" * max_widths[i] for i in cols))
for row in data:
log("|%s|" % "|".join(" %s" % row[i].ljust(max_widths[i] - 1) for i in cols))
log("")
def write_inconsistent(inconsistent, iterations):
"""Output inconsistent tests to logger.error."""
logger.error("## Unstable results ##\n")
strings = [("`%s`" % markdown_adjust(test), ("`%s`" % markdown_adjust(subtest)) if subtest else "", err_string(results, iterations))
for test, subtest, results in inconsistent]
table(["Test", "Subtest", "Results"], strings, logger.error)
def write_results(results, iterations, comment_pr):
"""Output all test results to logger.info."""
pr_number = None
if comment_pr:
try:
pr_number = int(comment_pr)
except ValueError:
pass
logger.info("## All results ##\n")
if pr_number:
logger.info("<details>\n")
logger.info("<summary>%i %s ran</summary>\n\n" % (len(results),
"tests" if len(results) > 1
else "test"))
for test, test_results in results.iteritems():
baseurl = "http://w3c-test.org/submissions"
if "https" in os.path.splitext(test)[0].split(".")[1:]:
baseurl = "https://w3c-test.org/submissions"
if pr_number:
logger.info("<details>\n")
logger.info('<summary><a href="%s/%s%s">%s</a></summary>\n\n' %
(baseurl, pr_number, test, test))
else:
logger.info("### %s ###" % test)
parent = test_results.pop(None)
strings = [("", err_string(parent, iterations))]
strings.extend(((("`%s`" % markdown_adjust(subtest)) if subtest
else "", err_string(results, iterations))
for subtest, results in test_results.iteritems()))
table(["Subtest", "Results"], strings, logger.info)
if pr_number:
logger.info("</details>\n")
if pr_number:
logger.info("</details>\n")
def get_parser():
"""Create and return script-specific argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--root",
action="store",
default=os.path.join(os.path.expanduser("~"), "build"),
help="Root path")
parser.add_argument("--iterations",
action="store",
default=10,
type=int,
help="Number of times to run tests")
parser.add_argument("--comment-pr",
action="store",
default=os.environ.get("TRAVIS_PULL_REQUEST"),
help="PR to comment on with stability results")
parser.add_argument("--user",
action="store",
# Travis docs say do not depend on USER env variable.
# This is a workaround to get what should be the same value
default=os.environ.get("TRAVIS_REPO_SLUG").split('/')[0],
help="Travis user name")
parser.add_argument("--output-bytes",
action="store",
type=int,
help="Maximum number of bytes to write to standard output/error")
parser.add_argument("product",
action="store",
help="Product to run against (`browser-name` or 'browser-name:channel')")
return parser
def main():
"""Perform check_stability functionality and return exit code."""
global wpt_root
global wptrunner_root
global logger
retcode = 0
parser = get_parser()
args = parser.parse_args()
if args.output_bytes is not None:
replace_streams(args.output_bytes,
"Log reached capacity (%s bytes); output disabled." % args.output_bytes)
logger = logging.getLogger(os.path.splitext(__file__)[0])
setup_logging()
wpt_root = os.path.abspath(os.curdir)
wptrunner_root = os.path.normpath(os.path.join(wpt_root, "..", "wptrunner"))
if not os.path.exists(args.root):
logger.critical("Root directory %s does not exist" % args.root)
return 1
os.chdir(args.root)
browser_name = args.product.split(":")[0]
with TravisFold("browser_setup"):
logger.info(format_comment_title(args.product))
browser_cls = {"firefox": Firefox,
"chrome": Chrome}.get(browser_name)
if browser_cls is None:
logger.critical("Unrecognised browser %s" % browser_name)
return 1
fetch_wpt_master(args.user)
head_sha1 = get_sha1()
logger.info("Testing web-platform-tests at revision %s" % head_sha1)
# For now just pass the whole list of changed files to wptrunner and
# assume that it will run everything that's actually a test
files_changed = get_files_changed()
if not files_changed:
logger.info("No files changed")
return 0
build_manifest()
install_wptrunner()
do_delayed_imports()
browser = browser_cls()
browser.install()
browser.install_webdriver()
try:
version = browser.version(args.root)
except Exception, e:
version = "unknown (error: %s)" % e
logger.info("Using browser at version %s", version)
logger.debug("Files changed:\n%s" % "".join(" * %s\n" % item for item in files_changed))
affected_testfiles = get_affected_testfiles(files_changed)
logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in affected_testfiles))
files_changed.extend(affected_testfiles)
kwargs = wptrunner_args(args.root,
files_changed,
args.iterations,
browser)
with TravisFold("running_tests"):
logger.info("Starting %i test iterations" % args.iterations)
with open("raw.log", "wb") as log:
wptrunner.setup_logging(kwargs,
{"raw": log})
# Setup logging for wptrunner that keeps process output and
# warning+ level logs only
wptrunner.logger.add_handler(
LogActionFilter(
LogLevelFilter(
StreamHandler(
sys.stdout,
TbplFormatter()
),
"WARNING"),
["log", "process_output"]))
wptrunner.run_tests(**kwargs)
with open("raw.log", "rb") as log:
results, inconsistent = process_results(log, args.iterations)
if results:
if inconsistent:
write_inconsistent(inconsistent, args.iterations)
retcode = 2
else:
logger.info("All results were stable\n")
with TravisFold("full_results"):
write_results(results, args.iterations, args.comment_pr)
else:
logger.info("No tests run.")
return retcode
if __name__ == "__main__":
try:
retcode = main()
except:
raise
else:
sys.exit(retcode)
| mpl-2.0 |
ryano144/intellij-community | python/lib/Lib/encodings/cp1253.py | 593 | 13350 | """ Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1253',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\ufffe' # 0xAA -> UNDEFINED
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe' # 0xD2 -> UNDEFINED
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
craynot/django | django/core/management/sql.py | 399 | 1890 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
def emit_post_migrate_signal(verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
| bsd-3-clause |
dpetzold/django | django/db/backends/sqlite3/operations.py | 106 | 10799 | from __future__ import unicode_literals
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import six, timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
try:
import pytz
except ImportError:
pytz = None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def _require_pytz(self):
if settings.USE_TZ and pytz is None:
raise ImproperlyConfigured("This query requires pytz, but it isn't installed.")
def datetime_cast_date_sql(self, field_name, tzname):
self._require_pytz()
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
self._require_pytz()
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
self._require_pytz()
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
keys = params.keys()
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(keys, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
| bsd-3-clause |
alexallah/django | tests/template_tests/filter_tests/test_urlizetrunc.py | 105 | 3353 | from django.template.defaultfilters import urlizetrunc
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class UrlizetruncTests(SimpleTestCase):
@setup({
'urlizetrunc01': '{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}'
})
def test_urlizetrunc01(self):
output = self.engine.render_to_string(
'urlizetrunc01',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
@setup({'urlizetrunc02': '{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}'})
def test_urlizetrunc02(self):
output = self.engine.render_to_string(
'urlizetrunc02',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
class FunctionTests(SimpleTestCase):
def test_truncate(self):
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(
urlizetrunc(uri, 31),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/test/</a>',
)
self.assertEqual(
urlizetrunc(uri, 30),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/t...</a>',
)
self.assertEqual(
urlizetrunc(uri, 2),
'<a href="http://31characteruri.com/test/"'
' rel="nofollow">...</a>',
)
def test_overtruncate(self):
self.assertEqual(
urlizetrunc('http://short.com/', 20), '<a href='
'"http://short.com/" rel="nofollow">http://short.com/</a>',
)
def test_query_string(self):
self.assertEqual(
urlizetrunc('http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20),
'<a href="http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'
'meta=" rel="nofollow">http://www.google...</a>',
)
def test_non_string_input(self):
self.assertEqual(urlizetrunc(123, 1), '123')
def test_autoescape(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 10),
'foo<a href=" <a href="http://google.com" rel="nofollow">google.com</a> ">bar</a>buz'
)
def test_autoescape_off(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 9, autoescape=False),
'foo<a href=" <a href="http://google.com" rel="nofollow">google...</a> ">bar</a>buz',
)
| bsd-3-clause |
mcanningjr/Wallflower | Wallflower_Client.py | 1 | 7796 | '''
This is the chat client wallflower; it connects currently to a server hosted by CaveFox Telecommunications; but that
can be changed to any server hosting the Wallflower_Server.py software package.
'''
import pickle
import requests
import time
import threading
import hashlib
message = ''
startpoint = 0
endpoint = 0
print(' Project Wallflower')
print(' One-Time Pad Cryptography Chat Software')
print('(c)2015 Michael Canning - CaveFox Telecommunications')
print('----------------------------------------------------')
print('All text is converted to lowercase, only letters and : are supported')
print('[System] - Loading One-Time Pad...')
pad = open("crypto.pad", 'r') # Loads the one time pad
pad = pickle.load(pad)
print('[System] - Loaded...')
username = str(raw_input("Desired Username: "))
ALPHABET = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", " ", ":"]
def md5(fname): # This is used to get a had of the pad
hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash.update(chunk)
return hash.hexdigest()
def encrypt(message, startpoint): # Encrypts the message
encoded = []
# Adds numbers to 'encoded' array based on the letter
for char in message.lower(): # Loops through input
for alphabetIndex in range(1, len(ALPHABET)): # Loops through alphabet
if char is ALPHABET[alphabetIndex - 1]: # Converts that letter to its matching number
encoded.append(alphabetIndex)
break
# z = 0 # This line seems useless but I was scared to delete it
final = ''
for num in encoded: # Loops through each number
encrypted = (x + pad[startpoint]) % 28 # Pad cipher
final = final + ALPHABET[num - 1] # Gets corresponding letter
startpoint = startpoint + 1
return final, startpoint
def decrypt(message, startpoint): # Decrypts the message
encoded = []
for x in message.lower():
if x is 'a':
encoded.append(1)
if x is 'b':
encoded.append(2)
if x is 'c':
encoded.append(3)
if x is 'd':
encoded.append(4)
if x is 'e':
encoded.append(5)
if x is 'f':
encoded.append(6)
if x is 'g':
encoded.append(7)
if x is 'h':
encoded.append(8)
if x is 'i':
encoded.append(9)
if x is 'j':
encoded.append(10)
if x is 'k':
encoded.append(11)
if x is 'l':
encoded.append(12)
if x is 'm':
encoded.append(13)
if x is 'n':
encoded.append(14)
if x is 'o':
encoded.append(15)
if x is 'p':
encoded.append(16)
if x is 'q':
encoded.append(17)
if x is 'r':
encoded.append(18)
if x is 's':
encoded.append(19)
if x is 't':
encoded.append(20)
if x is 'u':
encoded.append(21)
if x is 'v':
encoded.append(22)
if x is 'w':
encoded.append(23)
if x is 'x':
encoded.append(24)
if x is 'y':
encoded.append(25)
if x is 'z':
encoded.append(26)
if x is ' ':
encoded.append(27)
if x is ':':
encoded.append(28)
z = 0
final = ''
for x in encoded:
decryptic = x - pad[startpoint]
decryptic = decryptic % 28
startpoint = startpoint + 1
if decryptic is 1:
final = final + 'a'
if decryptic is 2:
final = final + 'b'
if decryptic is 3:
final = final + 'c'
if decryptic is 4:
final = final + 'd'
if decryptic is 5:
final = final + 'e'
if decryptic is 6:
final = final + 'f'
if decryptic is 7:
final = final + 'g'
if decryptic is 8:
final = final + 'h'
if decryptic is 9:
final = final + 'i'
if decryptic is 10:
final = final + 'j'
if decryptic is 11:
final = final + 'k'
if decryptic is 12:
final = final + 'l'
if decryptic is 13:
final = final + 'm'
if decryptic is 14:
final = final + 'n'
if decryptic is 15:
final = final + 'o'
if decryptic is 16:
final = final + 'p'
if decryptic is 17:
final = final + 'q'
if decryptic is 18:
final = final + 'r'
if decryptic is 19:
final = final + 's'
if decryptic is 20:
final = final + 't'
if decryptic is 21:
final = final + 'u'
if decryptic is 22:
final = final + 'v'
if decryptic is 23:
final = final + 'w'
if decryptic is 24:
final = final + 'x'
if decryptic is 25:
final = final + 'y'
if decryptic is 26:
final = final + 'z'
if decryptic is 27:
final = final + ' '
if decryptic is 0:
final = final + ':'
return final, startpoint
class getmessage(threading.Thread): # Thread to get the latest message every second; time can be change to faster or slower
def __init__(self, id):
self.id = id
threading.Thread.__init__(self)
def run(self):
messagecheck = ''
flag = 0
while True:
time.sleep(1)
r = requests.get('http://198.100.155.138:5000/read/startpoint/' + str(id))
startpoint = int(r.text)
r = requests.get('http://198.100.155.138:5000/read/message/' + str(id))
cryptic = str(r.text)
if (cryptic != messagecheck):
if (flag >= 5):
r = requests.get('http://198.100.155.138:5000/read/nextpoint/' + str(id))
nextpoint = int(r.text)
print('[System] - ' + str(float((len(pad) - nextpoint - 1))/float(len(pad))) + "% of Pad Used")
flag = 0
else:
flag = flag + 1
message, trash = decrypt(cryptic, startpoint)
print "[Channel] - " + message
messagecheck = cryptic
class sendmessage(threading.Thread): # Sends messages with a thread, and also sends the join server message
def __init__(self, id):
self.username = username
self.id = id
r = requests.get('http://198.100.155.138:5000/read/nextpoint/' + str(id))
startpoint = int(r.text)
print('[System] - You are chatting securely on channel: [' + str(id) + ']')
cryptic, startpointx = encrypt(self.username + " Has Joined!", startpoint)
requests.get("http://198.100.155.138:5000/post/" + str(id) + "/" + str(cryptic) + "/" + str(len('A User Has Joined')))
threading.Thread.__init__(self)
def run(self):
while True:
message = str(raw_input('Message: \n'))
r = requests.get('http://198.100.155.138:5000/read/nextpoint/' + str(id))
startpoint = int(r.text)
cryptic, startpointx = encrypt(self.username + ' : ' + message, startpoint)
requests.get("http://198.100.155.138:5000/post/" + str(id) + "/" + str(cryptic) + "/" + str(len(message)))
id = abs(int(hash(md5('crypto.pad')))) # Hashes the Pad to connect to the channel for it on the server
getmessage(id).start() # Starts the message get thread
sendmessage(id).start() # Starts the message send thread
| mit |
watonyweng/neutron | neutron/db/migration/alembic_migrations/dvr_init_opts.py | 32 | 2933 | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for dvr
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'dvr_host_macs',
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('mac_address', sa.String(length=32),
nullable=False, unique=True),
sa.PrimaryKeyConstraint('host')
)
op.create_table(
'ml2_dvr_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('router_id', sa.String(length=36), nullable=True),
sa.Column('vif_type', sa.String(length=64), nullable=False),
sa.Column('vif_details', sa.String(length=4095),
nullable=False, server_default=''),
sa.Column('vnic_type', sa.String(length=64),
nullable=False, server_default='normal'),
sa.Column('profile', sa.String(length=4095),
nullable=False, server_default=''),
sa.Column('cap_port_filter', sa.Boolean(), nullable=False),
sa.Column('driver', sa.String(length=64), nullable=True),
sa.Column('segment', sa.String(length=36), nullable=True),
sa.Column(u'status', sa.String(16), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['segment'], ['ml2_network_segments.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('port_id', 'host')
)
op.create_table(
'csnat_l3_agent_bindings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('l3_agent_id', sa.String(length=36), nullable=False),
sa.Column('host_id', sa.String(length=255), nullable=True),
sa.Column('csnat_gw_port_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['l3_agent_id'], ['agents.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['csnat_gw_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
| apache-2.0 |
areski/django | tests/gis_tests/geogapp/tests.py | 20 | 6033 | """
Tests for geography support in PostGIS
"""
from __future__ import unicode_literals
import os
from unittest import skipUnless
from django.contrib.gis.db.models.functions import Area, Distance
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.measure import D
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from ..utils import oracle, postgis
from .models import City, County, Zipcode
@skipUnlessDBFeature("gis_enabled")
class GeographyTest(TestCase):
fixtures = ['initial']
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
@skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic")
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
@skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
Zipcode.objects.distance(htown.point)
@skipUnless(postgis, "This is a PostGIS-specific test")
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name': 'Name',
'state': 'State',
'mpoly': 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
@skipUnlessDBFeature("has_area_method", "supports_distance_geodetic")
@ignore_warnings(category=RemovedInDjango20Warning)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.13586914 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
@skipUnlessDBFeature("gis_enabled")
class GeographyFunctionTests(TestCase):
fixtures = ['initial']
@skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic")
def test_distance_function(self):
"""
Testing Distance() support on non-point geography fields.
"""
if oracle:
ref_dists = [0, 4899.68, 8081.30, 9115.15]
else:
ref_dists = [0, 4891.20, 8071.64, 9123.95]
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.annotate(distance=Distance('poly', htown.point))
for z, ref in zip(qs, ref_dists):
self.assertAlmostEqual(z.distance.m, ref, 2)
@skipUnlessDBFeature("has_Area_function", "supports_distance_geodetic")
def test_geography_area(self):
"""
Testing that Area calculations work on geography columns.
"""
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439100.13587 if oracle else 5439084.70637573
tol = 5
z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
| bsd-3-clause |
rishiloyola/bedrock | bedrock/mozorg/tests/test_context_processors.py | 29 | 1553 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.test.client import RequestFactory
from bedrock.base.urlresolvers import reverse
from nose.tools import eq_
from bedrock.mozorg.context_processors import funnelcake_param
from bedrock.mozorg.tests import TestCase
class TestFunnelcakeParam(TestCase):
def setUp(self):
self.rf = RequestFactory()
def _funnelcake(self, url='/', **kwargs):
return funnelcake_param(self.rf.get(url, kwargs))
def test_funnelcake_param_noop(self):
"""Should return an empty dict normally."""
eq_(self._funnelcake(), {})
def test_funnelcake_param_f(self):
"""Should inject funnelcake into context."""
eq_(self._funnelcake(f='5'), {'funnelcake_id': '5'})
eq_(self._funnelcake(f='234'), {'funnelcake_id': '234'})
def test_funnelcake_param_bad(self):
"""Should not inject bad funnelcake into context."""
eq_(self._funnelcake(f='5dude'), {})
eq_(self._funnelcake(f='123456'), {})
def test_funnelcake_param_increment_installer_help(self):
"""FC param should be +1 on the firefox/installer-help/ page.
Bug 933852.
"""
url = reverse('firefox.installer-help')
ctx = self._funnelcake(url, f='20')
eq_(ctx['funnelcake_id'], '21')
ctx = self._funnelcake(url, f='10')
eq_(ctx['funnelcake_id'], '11')
| mpl-2.0 |
jdugge/QGIS | python/plugins/processing/script/ScriptUtils.py | 12 | 4991 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from qgis.processing import alg as algfactory
import os
import inspect
import importlib
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsApplication,
QgsProcessingAlgorithm,
QgsProcessingFeatureBasedAlgorithm,
QgsMessageLog
)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools.system import mkdir, userFolder
scriptsRegistry = dict()
SCRIPTS_FOLDERS = "SCRIPTS_FOLDERS"
def defaultScriptsFolder():
folder = str(os.path.join(userFolder(), "scripts"))
mkdir(folder)
return os.path.abspath(folder)
def scriptsFolders():
folder = ProcessingConfig.getSetting(SCRIPTS_FOLDERS)
if folder is not None:
return folder.split(";")
else:
return [defaultScriptsFolder()]
def loadAlgorithm(moduleName, filePath):
global scriptsRegistry
try:
spec = importlib.util.spec_from_file_location(moduleName, filePath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
try:
alg = algfactory.instances.pop().createInstance()
scriptsRegistry[alg.name()] = filePath
return alg
except IndexError:
for x in dir(module):
obj = getattr(module, x)
if inspect.isclass(obj) and issubclass(obj, (QgsProcessingAlgorithm, QgsProcessingFeatureBasedAlgorithm)) and obj.__name__ not in ("QgsProcessingAlgorithm", "QgsProcessingFeatureBasedAlgorithm"):
o = obj()
scriptsRegistry[o.name()] = filePath
return o
except (ImportError, AttributeError, TypeError) as e:
QgsMessageLog.logMessage(QCoreApplication.translate("ScriptUtils", "Could not import script algorithm '{}' from '{}'\n{}").format(moduleName, filePath, str(e)),
QCoreApplication.translate("ScriptUtils", "Processing"),
Qgis.Critical)
def findAlgorithmSource(name):
global scriptsRegistry
try:
return scriptsRegistry[name]
except:
return None
def resetScriptFolder(folder):
"""Check if script folder exist. If not, notify and try to check if it is absolute to another user setting.
If so, modify folder to change user setting to the current user setting."""
newFolder = folder
if os.path.exists(newFolder):
return newFolder
QgsMessageLog.logMessage(QgsApplication .translate("loadAlgorithms", "Script folder {} does not exist").format(newFolder),
QgsApplication.translate("loadAlgorithms", "Processing"),
Qgis.Warning)
if not os.path.isabs(newFolder):
return None
# try to check if folder is absolute to other QgsApplication.qgisSettingsDirPath()
# isolate "QGIS3/profiles/"
appIndex = -4
profileIndex = -3
currentSettingPath = QgsApplication.qgisSettingsDirPath()
paths = currentSettingPath.split(os.sep)
commonSettingPath = os.path.join(paths[appIndex], paths[profileIndex])
if commonSettingPath in newFolder:
# strip not common folder part. e.g. preserve the profile path
# stripping the heading part that come from another location
tail = newFolder[newFolder.find(commonSettingPath):]
# tail folder with the actual userSetting path
header = os.path.join(os.sep, os.path.join(*paths[:appIndex]))
newFolder = os.path.join(header, tail)
# skip if it does not exist
if not os.path.exists(newFolder):
return None
QgsMessageLog.logMessage(QgsApplication .translate("loadAlgorithms", "Script folder changed into {}").format(newFolder),
QgsApplication.translate("loadAlgorithms", "Processing"),
Qgis.Warning)
return newFolder
| gpl-2.0 |
ChameleonCloud/horizon | openstack_dashboard/test/unit/test_error_pages.py | 10 | 1325 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
from django.conf import settings
from openstack_dashboard.test import helpers as test
class ErrorPageTests(test.TestCase):
"""Tests for error pages."""
urls = 'openstack_dashboard.test.error_pages_urls'
def test_500_error(self):
with self.settings(
TEMPLATES=[{
'DIRS': [path.join(settings.ROOT_PATH, 'templates')],
'BACKEND': ('django.template.backends.django.'
'DjangoTemplates')
}],
ROOT_URLCONF=self.urls):
response = self.client.get('/500/')
self.assertIn(b'Server error', response.content)
| apache-2.0 |
jesseditson/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/security.py | 145 | 4264 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the security helpers.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.security import check_password_hash, generate_password_hash, \
safe_join, pbkdf2_hex, safe_str_cmp
class SecurityTestCase(WerkzeugTestCase):
def test_safe_str_cmp(self):
assert safe_str_cmp('a', 'a') is True
assert safe_str_cmp(b'a', u'a') is True
assert safe_str_cmp('a', 'b') is False
assert safe_str_cmp(b'aaa', 'aa') is False
assert safe_str_cmp(b'aaa', 'bbb') is False
assert safe_str_cmp(b'aaa', u'aaa') is True
def test_password_hashing(self):
hash0 = generate_password_hash('default')
assert check_password_hash(hash0, 'default')
assert hash0.startswith('pbkdf2:sha1:1000$')
hash1 = generate_password_hash('default', 'sha1')
hash2 = generate_password_hash(u'default', method='sha1')
assert hash1 != hash2
assert check_password_hash(hash1, 'default')
assert check_password_hash(hash2, 'default')
assert hash1.startswith('sha1$')
assert hash2.startswith('sha1$')
fakehash = generate_password_hash('default', method='plain')
assert fakehash == 'plain$$default'
assert check_password_hash(fakehash, 'default')
mhash = generate_password_hash(u'default', method='md5')
assert mhash.startswith('md5$')
assert check_password_hash(mhash, 'default')
legacy = 'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
legacy = u'md5$$c21f969b5f03d33d43e04f8f136e7682'
assert check_password_hash(legacy, 'default')
def test_safe_join(self):
assert safe_join('foo', 'bar/baz') == os.path.join('foo', 'bar/baz')
assert safe_join('foo', '../bar/baz') is None
if os.name == 'nt':
assert safe_join('foo', 'foo\\bar') is None
def test_pbkdf2(self):
def check(data, salt, iterations, keylen, expected):
rv = pbkdf2_hex(data, salt, iterations, keylen)
self.assert_equal(rv, expected)
# From RFC 6070
check('password', 'salt', 1, None,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 1, 20,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 2, 20,
'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957')
check('password', 'salt', 4096, 20,
'4b007901b765489abead49d926f721d065a429c1')
check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt',
4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038')
check('pass\x00word', 'sa\x00lt', 4096, 16,
'56fa6aa75548099dcc37d7f03425e0c3')
# This one is from the RFC but it just takes for ages
##check('password', 'salt', 16777216, 20,
## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
# From Crypt-PBKDF2
check('password', 'ATHENA.MIT.EDUraeburn', 1, 16,
'cdedb5281bb2f801565a1122b2563515')
check('password', 'ATHENA.MIT.EDUraeburn', 1, 32,
'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 16,
'01dbee7f4a9e243e988b62c73cda935d')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 32,
'01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86')
check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32,
'5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13')
check('X' * 64, 'pass phrase equals block size', 1200, 32,
'139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1')
check('X' * 65, 'pass phrase exceeds block size', 1200, 32,
'9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SecurityTestCase))
return suite
| agpl-3.0 |
vermouthmjl/scikit-learn | sklearn/metrics/classification.py | 1 | 69294 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# Bernardo Stein <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
y_pred = check_array(y_pred, ensure_2d=False)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
https://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/encodings/utf_32.py | 180 | 5128 | """
Python 'utf-32' Codec
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_32_encode
def decode(input, errors='strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
def getstate(self):
# additonal state info from the base class must be None here,
# as it isn't passed along to the caller
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
# additional state info we pass to the caller:
# 0: stream is in natural order for this platform
# 1: stream is in unnatural order
# 2: endianness hasn't been determined yet
if self.decoder is None:
return (state, 2)
addstate = int((sys.byteorder == "big") !=
(self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = (codecs.utf_32_be_decode
if sys.byteorder == "big"
else codecs.utf_32_le_decode)
elif state == 1:
self.decoder = (codecs.utf_32_le_decode
if sys.byteorder == "big"
else codecs.utf_32_be_decode)
else:
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed>=4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-32',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
ianblenke/awsebcli | ebcli/bundled/botocore/vendored/requests/compat.py | 114 | 2601 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| apache-2.0 |