repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AlexanderFabisch/scikit-learn | sklearn/metrics/pairwise.py | 9 | 45248 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
jschultz/Gooey | gooey/python_bindings/config_generator.py | 1 | 2477 | import os
import sys
from gooey.gui.windows import layouts
from gooey.python_bindings import argparse_to_json
from gooey.gui.util.quoting import quote
def create_from_parser(parser, source_path, cmd_args, **kwargs):
auto_start = kwargs.get('auto_start', False)
if hasattr(sys, 'frozen'):
run_cmd = quote(source_path)
else:
run_cmd = '{} -u {}'.format(quote(sys.executable), quote(source_path))
build_spec = {
'language': kwargs.get('language', 'english'),
'target': run_cmd,
'program_name': kwargs.get('program_name') or os.path.basename(sys.argv[0]).replace('.py', ''),
'program_description': kwargs.get('program_description', ''),
'auto_start': kwargs.get('auto_start', False),
'show_advanced': kwargs.get('advanced', True),
'default_size': kwargs.get('default_size', (610, 530)),
'manual_start': False,
'layout_type': 'flat',
'monospace_display': kwargs.get('monospace_display', False),
'image_dir': kwargs.get('image_dir'),
'language_dir': kwargs.get('language_dir'),
'progress_regex': kwargs.get('progress_regex'),
'progress_expr': kwargs.get('progress_expr'),
'disable_progress_bar_animation': kwargs.get('disable_progress_bar_animation'),
'disable_stop_button': kwargs.get('disable_stop_button'),
'group_by_type': kwargs.get('group_by_type', True),
'ignore_command': kwargs.get('ignore_command', None),
'force_command': kwargs.get('force_command', None),
'use_argparse_groups': kwargs.get('use_argparse_groups', False),
'use_tabs': kwargs.get('use_tabs', False)
}
if build_spec['use_argparse_groups']:
build_spec['num_default_cols'] = kwargs.get('default_cols', 2)
build_spec['num_cols_dict'] = kwargs.get('cols_dict', {})
else:
build_spec['num_cols_dict'] = {"required arguments": kwargs.get('required_cols', 1),
"optional arguments": kwargs.get('optional_cols', 3)}
if not auto_start:
build_spec['program_description'] = parser.description or build_spec['program_description']
layout_data = argparse_to_json.convert(parser, build_spec['use_argparse_groups'], cmd_args=cmd_args) if build_spec['show_advanced'] else layouts.basic_config.items()
build_spec.update(layout_data)
return build_spec
| mit |
timokoola/hslbot | docutils/parsers/rst/languages/fi.py | 128 | 3661 | # -*- coding: utf-8 -*-
# $Id: fi.py 7119 2011-09-02 13:00:23Z milde $
# Author: Asko Soukka <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Finnish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'huomio': u'attention',
u'varo': u'caution',
u'code (translation required)': 'code',
u'vaara': u'danger',
u'virhe': u'error',
u'vihje': u'hint',
u't\u00e4rke\u00e4\u00e4': u'important',
u'huomautus': u'note',
u'neuvo': u'tip',
u'varoitus': u'warning',
u'kehotus': u'admonition',
u'sivupalkki': u'sidebar',
u'aihe': u'topic',
u'rivi': u'line-block',
u'tasalevyinen': u'parsed-literal',
u'ohje': u'rubric',
u'epigraafi': u'epigraph',
u'kohokohdat': u'highlights',
u'lainaus': u'pull-quote',
u'taulukko': u'table',
u'csv-taulukko': u'csv-table',
u'list-table (translation required)': 'list-table',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#u'kysymykset': u'questions',
u'meta': u'meta',
'math (translation required)': 'math',
#u'kuvakartta': u'imagemap',
u'kuva': u'image',
u'kaavio': u'figure',
u'sis\u00e4llyt\u00e4': u'include',
u'raaka': u'raw',
u'korvaa': u'replace',
u'unicode': u'unicode',
u'p\u00e4iv\u00e4ys': u'date',
u'luokka': u'class',
u'rooli': u'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'sis\u00e4llys': u'contents',
u'kappale': u'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#u'alaviitteet': u'footnotes',
#u'viitaukset': u'citations',
u'target-notes (translation required)': u'target-notes'}
"""Finnish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'lyhennys': u'abbreviation',
u'akronyymi': u'acronym',
u'kirjainsana': u'acronym',
u'code (translation required)': 'code',
u'hakemisto': u'index',
u'luettelo': u'index',
u'alaindeksi': u'subscript',
u'indeksi': u'subscript',
u'yl\u00e4indeksi': u'superscript',
u'title-reference (translation required)': u'title-reference',
u'title (translation required)': u'title-reference',
u'pep-reference (translation required)': u'pep-reference',
u'rfc-reference (translation required)': u'rfc-reference',
u'korostus': u'emphasis',
u'vahvistus': u'strong',
u'tasalevyinen': u'literal',
'math (translation required)': 'math',
u'named-reference (translation required)': u'named-reference',
u'anonymous-reference (translation required)': u'anonymous-reference',
u'footnote-reference (translation required)': u'footnote-reference',
u'citation-reference (translation required)': u'citation-reference',
u'substitution-reference (translation required)': u'substitution-reference',
u'kohde': u'target',
u'uri-reference (translation required)': u'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Finnish role names to canonical role names for interpreted text.
"""
| apache-2.0 |
bgxavier/neutron | neutron/db/metering/metering_db.py | 17 | 11434 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.api.rpc.agentnotifiers import metering_rpc_agent_api
from neutron.common import constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import metering
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
class MeteringLabelRule(model_base.BASEV2, models_v2.HasId):
direction = sa.Column(sa.Enum('ingress', 'egress',
name='meteringlabels_direction'))
remote_ip_prefix = sa.Column(sa.String(64))
metering_label_id = sa.Column(sa.String(36),
sa.ForeignKey("meteringlabels.id",
ondelete="CASCADE"),
nullable=False)
excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringLabel(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(1024))
rules = orm.relationship(MeteringLabelRule, backref="label",
cascade="delete", lazy="joined")
routers = orm.relationship(
l3_db.Router,
primaryjoin="MeteringLabel.tenant_id==Router.tenant_id",
foreign_keys='MeteringLabel.tenant_id',
uselist=True)
shared = sa.Column(sa.Boolean, default=False, server_default=sql.false())
class MeteringDbMixin(metering.MeteringPluginBase,
base_db.CommonDbMixin):
def __init__(self):
self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI()
def _make_metering_label_dict(self, metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'shared': metering_label['shared'],
'tenant_id': metering_label['tenant_id']}
return self._fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
tenant_id = self._get_tenant_id_for_create(context, m)
with context.session.begin(subtransactions=True):
metering_db = MeteringLabel(id=uuidutils.generate_uuid(),
description=m['description'],
tenant_id=tenant_id,
name=m['name'],
shared=m['shared'])
context.session.add(metering_db)
return self._make_metering_label_dict(metering_db)
def delete_metering_label(self, context, label_id):
with context.session.begin(subtransactions=True):
try:
label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
context.session.delete(label)
def get_metering_label(self, context, label_id, fields=None):
try:
metering_label = self._get_by_id(context, MeteringLabel, label_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelNotFound(label_id=label_id)
return self._make_metering_label_dict(metering_label, fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_labels', limit,
marker)
return self._get_collection(context, MeteringLabel,
self._make_metering_label_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def _make_metering_label_rule_dict(self, metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule['remote_ip_prefix'],
'excluded': metering_label_rule['excluded']}
return self._fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'metering_label_rules',
limit, marker)
return self._get_collection(context, MeteringLabelRule,
self._make_metering_label_rule_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
def get_metering_label_rule(self, context, rule_id, fields=None):
try:
metering_label_rule = self._get_by_id(context,
MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
return self._make_metering_label_rule_dict(metering_label_rule, fields)
def _validate_cidr(self, context, label_id, remote_ip_prefix,
direction, excluded):
r_ips = self.get_metering_label_rules(context,
filters={'metering_label_id':
[label_id],
'direction':
[direction],
'excluded':
[excluded]},
fields=['remote_ip_prefix'])
cidrs = [r['remote_ip_prefix'] for r in r_ips]
new_cidr_ipset = netaddr.IPSet([remote_ip_prefix])
if (netaddr.IPSet(cidrs) & new_cidr_ipset):
raise metering.MeteringLabelRuleOverlaps(
remote_ip_prefix=remote_ip_prefix)
def create_metering_label_rule(self, context, metering_label_rule):
m = metering_label_rule['metering_label_rule']
with context.session.begin(subtransactions=True):
label_id = m['metering_label_id']
ip_prefix = m['remote_ip_prefix']
direction = m['direction']
excluded = m['excluded']
self._validate_cidr(context, label_id, ip_prefix, direction,
excluded)
metering_db = MeteringLabelRule(id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=direction,
excluded=m['excluded'],
remote_ip_prefix=ip_prefix)
context.session.add(metering_db)
return self._make_metering_label_rule_dict(metering_db)
def delete_metering_label_rule(self, context, rule_id):
with context.session.begin(subtransactions=True):
try:
rule = self._get_by_id(context, MeteringLabelRule, rule_id)
except orm.exc.NoResultFound:
raise metering.MeteringLabelRuleNotFound(rule_id=rule_id)
context.session.delete(rule)
return self._make_metering_label_rule_dict(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
res = {'id': router['id'],
'name': router['name'],
'tenant_id': router['tenant_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, context, labels):
all_routers = None
routers_dict = {}
for label in labels:
if label.shared:
if not all_routers:
all_routers = self._get_collection_query(context,
l3_db.Router)
routers = all_routers
else:
routers = label.routers
for router in routers:
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_for_rule(self, context, rule):
label = context.session.query(MeteringLabel).get(
rule['metering_label_id'])
if label.shared:
routers = self._get_collection_query(context, l3_db.Router)
else:
routers = label.routers
routers_dict = {}
for router in routers:
router_dict = routers_dict.get(router['id'],
self._make_router_dict(router))
data = {'id': label['id'], 'rule': rule}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return routers_dict.values()
def get_sync_data_metering(self, context, label_id=None, router_ids=None):
labels = context.session.query(MeteringLabel)
if label_id:
labels = labels.filter(MeteringLabel.id == label_id)
elif router_ids:
labels = (labels.join(MeteringLabel.routers).
filter(l3_db.Router.id.in_(router_ids)))
return self._process_sync_metering_data(context, labels)
| apache-2.0 |
abdulbaqi/quranf | venv/lib/python2.7/site-packages/pip/vcs/bazaar.py | 393 | 4943 | import os
import tempfile
import re
from pip.backwardcompat import urlparse
from pip.log import logger
from pip.util import rmtree, display_path, call_subprocess
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
bundle_file = 'bzr-branch.txt'
schemes = ('bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', 'bzr+lp')
guide = ('# This was a Bazaar branch; to make it a branch again run:\n'
'bzr branch -r %(rev)s %(url)s .\n')
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urlparse, 'uses_fragment', None):
urlparse.uses_fragment.extend(['lp'])
urlparse.non_hierarchical.extend(['lp'])
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^bzr\s*branch\s*-r\s*(\d*)', line)
if match:
rev = match.group(1).strip()
url = line[match.end():].strip().split(None, 1)[0]
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Bazaar repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
call_subprocess([self.cmd, 'export', location], cwd=temp_dir,
filter_stdout=self._filter, show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
call_subprocess([self.cmd, 'switch', url], cwd=dest)
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = call_subprocess(
[self.cmd, 'info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = call_subprocess(
[self.cmd, 'revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| mit |
Juniper/neutron | neutron/tests/unit/ml2/test_type_vxlan.py | 5 | 9439 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
from oslo.config import cfg
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
from neutron.db import api as db
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_vxlan
from neutron.tests import base
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
INVALID_VXLAN_VNI = 7337
MULTICAST_GROUP = "239.1.1.1"
VXLAN_UDP_PORT_ONE = 9999
VXLAN_UDP_PORT_TWO = 8888
class VxlanTypeTest(base.BaseTestCase):
def setUp(self):
super(VxlanTypeTest, self).setUp()
db.configure_db()
cfg.CONF.set_override('vni_ranges', [TUNNEL_RANGES],
group='ml2_type_vxlan')
cfg.CONF.set_override('vxlan_group', MULTICAST_GROUP,
group='ml2_type_vxlan')
self.driver = type_vxlan.VxlanTypeDriver()
self.driver.vxlan_vni_ranges = TUNNEL_RANGES
self.driver._sync_vxlan_allocations()
self.session = db.get_session()
self.addCleanup(cfg.CONF.reset)
self.addCleanup(db.clear_db)
def test_vxlan_tunnel_type(self):
self.assertEqual(self.driver.get_type(), p_const.TYPE_VXLAN)
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN - 1))
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MIN + 1)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX - 1)).allocated
)
self.assertFalse(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX)).allocated
)
self.assertIsNone(
self.driver.get_vxlan_allocation(self.session,
(TUN_MAX + 1))
)
self.driver.vxlan_vni_ranges = UPDATED_TUNNEL_RANGES
self.driver._sync_vxlan_allocations()
self.assertIsNone(self.driver.
get_vxlan_allocation(self.session,
(TUN_MIN + 5 - 1)))
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MIN + 5)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MIN + 5 + 1)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MAX + 5 - 1)).
allocated)
self.assertFalse(self.driver.
get_vxlan_allocation(self.session, (TUN_MAX + 5)).
allocated)
self.assertIsNone(self.driver.
get_vxlan_allocation(self.session,
(TUN_MAX + 5 + 1)))
def test_reserve_provider_segment(self):
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: 101}
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_vxlan_allocation(self.session,
segment[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(segment)
segment = {api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_vxlan_endpoints(self):
"""Test VXLAN allocation/de-allocation."""
# Set first endpoint, verify it gets VXLAN VNI 1
vxlan1_endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE,
VXLAN_UDP_PORT_ONE)
self.assertEqual(TUNNEL_IP_ONE, vxlan1_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_ONE, vxlan1_endpoint.udp_port)
# Set second endpoint, verify it gets VXLAN VNI 2
vxlan2_endpoint = self.driver.add_endpoint(TUNNEL_IP_TWO,
VXLAN_UDP_PORT_TWO)
self.assertEqual(TUNNEL_IP_TWO, vxlan2_endpoint.ip_address)
self.assertEqual(VXLAN_UDP_PORT_TWO, vxlan2_endpoint.udp_port)
# Get all the endpoints
endpoints = self.driver.get_endpoints()
for endpoint in endpoints:
if endpoint['ip_address'] == TUNNEL_IP_ONE:
self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port'])
elif endpoint['ip_address'] == TUNNEL_IP_TWO:
self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port'])
class VxlanTypeMultiRangeTest(base.BaseTestCase):
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(VxlanTypeMultiRangeTest, self).setUp()
db.configure_db()
self.driver = type_vxlan.VxlanTypeDriver()
self.driver.vxlan_vni_ranges = self.TUNNEL_MULTI_RANGES
self.driver._sync_vxlan_allocations()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_vxlan_allocation(self.session, key)
self.assertFalse(alloc.allocated)
| apache-2.0 |
dingocuster/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
mdj2/django | django/contrib/auth/tests/test_forms.py | 6 | 16393 | from __future__ import unicode_literals
import os
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core import mail
from django.forms.fields import Field, CharField
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils._os import upath
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.error_messages['duplicate_username'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': '[email protected]',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: [email protected]>')
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True):
with translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
UserModel = get_user_model()
username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[force_text(form.fields['username'].error_messages['invalid'])])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
form = MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""creates a user and returns a tuple
(user_object, username, email)
"""
username = 'jsmith'
email = '[email protected]'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistant_email(self):
# Test nonexistant email address. This should not fail because it would
# expose information about registered users.
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_cleaned_data(self):
# Regression test
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
)
def test_custom_email_subject(self):
data = {'email': '[email protected]'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_bug_5605(self):
# bug #5605, preserve the case of the user name (before the @ in the
# email address) when creating a user.
user = User.objects.create_user('forms_test2', '[email protected]', 'test')
self.assertEqual(user.email, '[email protected]')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
#tests that inactive user cannot
#receive password reset email
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', '[email protected]', 'test')
data = {"email": "[email protected]"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field._has_changed('aaa', 'bbb'))
| bsd-3-clause |
TemplateVoid/mapnik | scons/scons-local-2.3.1/SCons/Tool/suncc.py | 8 | 1998 | """SCons.Tool.suncc
Tool-specific initialization for Sun Solaris (Forte) CC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/suncc.py 2014/03/02 14:18:15 garyo"
import SCons.Util
import cc
def generate(env):
"""
Add Builders and construction variables for Forte C and C++ compilers
to an Environment.
"""
cc.generate(env)
env['CXX'] = 'CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -KPIC')
env['SHOBJPREFIX'] = 'so_'
env['SHOBJSUFFIX'] = '.o'
def exists(env):
return env.Detect('CC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
motion2015/a3 | common/lib/xmodule/xmodule/library_tools.py | 5 | 6842 | """
XBlock runtime services for LibraryContentModule
"""
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.locator import LibraryLocator
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.capa_module import CapaDescriptor
class LibraryToolsService(object):
"""
Service that allows LibraryContentModule to interact with libraries in the
modulestore.
"""
def __init__(self, modulestore):
self.store = modulestore
def _get_library(self, library_key):
"""
Given a library key like "library-v1:ProblemX+PR0B", return the
'library' XBlock with meta-information about the library.
A specific version may be specified.
Returns None on error.
"""
if not isinstance(library_key, LibraryLocator):
library_key = LibraryLocator.from_string(library_key)
try:
return self.store.get_library(
library_key, remove_version=False, remove_branch=False, head_validation=False
)
except ItemNotFoundError:
return None
def get_library_version(self, lib_key):
"""
Get the version (an ObjectID) of the given library.
Returns None if the library does not exist.
"""
library = self._get_library(lib_key)
if library:
# We need to know the library's version so ensure it's set in library.location.library_key.version_guid
assert library.location.library_key.version_guid is not None
return library.location.library_key.version_guid
return None
def create_block_analytics_summary(self, course_key, block_keys):
"""
Given a CourseKey and a list of (block_type, block_id) pairs,
prepare the JSON-ready metadata needed for analytics logging.
This is [
{"usage_key": x, "original_usage_key": y, "original_usage_version": z, "descendants": [...]}
]
where the main list contains all top-level blocks, and descendants contains a *flat* list of all
descendants of the top level blocks, if any.
"""
def summarize_block(usage_key):
""" Basic information about the given block """
orig_key, orig_version = self.store.get_block_original_usage(usage_key)
return {
"usage_key": unicode(usage_key),
"original_usage_key": unicode(orig_key) if orig_key else None,
"original_usage_version": unicode(orig_version) if orig_version else None,
}
result_json = []
for block_key in block_keys:
key = course_key.make_usage_key(*block_key)
info = summarize_block(key)
info['descendants'] = []
try:
block = self.store.get_item(key, depth=None) # Load the item and all descendants
children = list(getattr(block, "children", []))
while children:
child_key = children.pop()
child = self.store.get_item(child_key)
info['descendants'].append(summarize_block(child_key))
children.extend(getattr(child, "children", []))
except ItemNotFoundError:
pass # The block has been deleted
result_json.append(info)
return result_json
def _filter_child(self, usage_key, capa_type):
"""
Filters children by CAPA problem type, if configured
"""
if capa_type == ANY_CAPA_TYPE_VALUE:
return True
if usage_key.block_type != "problem":
return False
descriptor = self.store.get_item(usage_key, depth=0)
assert isinstance(descriptor, CapaDescriptor)
return capa_type in descriptor.problem_types
def can_use_library_content(self, block):
"""
Determines whether a modulestore holding a course_id supports libraries.
"""
return self.store.check_supports(block.location.course_key, 'copy_from_template')
def update_children(self, dest_block, user_id, user_perms=None, version=None):
"""
This method is to be used when the library that a LibraryContentModule
references has been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of dest_block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update dest_block's 'source_library_version' field to
store the version number of the libraries used, so we easily determine
if dest_block is up to date or not.
"""
if user_perms and not user_perms.can_write(dest_block.location.course_key):
raise PermissionDenied()
if not dest_block.source_library_id:
dest_block.source_library_version = ""
return
source_blocks = []
library_key = dest_block.source_library_key
if version:
library_key = library_key.replace(branch=ModuleStoreEnum.BranchName.library, version_guid=version)
library = self._get_library(library_key)
if library is None:
raise ValueError("Requested library not found.")
if user_perms and not user_perms.can_read(library_key):
raise PermissionDenied()
filter_children = (dest_block.capa_type != ANY_CAPA_TYPE_VALUE)
if filter_children:
# Apply simple filtering based on CAPA problem types:
source_blocks.extend([key for key in library.children if self._filter_child(key, dest_block.capa_type)])
else:
source_blocks.extend(library.children)
with self.store.bulk_operations(dest_block.location.course_key):
dest_block.source_library_version = unicode(library.location.library_key.version_guid)
self.store.update_item(dest_block, user_id)
head_validation = not version
dest_block.children = self.store.copy_from_template(
source_blocks, dest_block.location, user_id, head_validation=head_validation
)
# ^-- copy_from_template updates the children in the DB
# but we must also set .children here to avoid overwriting the DB again
def list_available_libraries(self):
"""
List all known libraries.
Returns tuples of (LibraryLocator, display_name)
"""
return [
(lib.location.library_key.replace(version_guid=None, branch=None), lib.display_name)
for lib in self.store.get_libraries()
]
| agpl-3.0 |
andresailer/DIRAC | tests/Integration/DataManagementSystem/Test_FileCatalogDB.py | 1 | 56699 | """ This is a test of the FileCatalogDB
It supposes that the DB is present.
"""
# pylint: disable=invalid-name,wrong-import-position
import unittest
import itertools
import os
import sys
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.DataManagementSystem.DB.FileCatalogDB import FileCatalogDB
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
x509Chain = "<X509Chain 3 certs [/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch]"
x509Chain += "[/DC=ch/DC=cern/CN=CERN Trusted Certification Authority][/DC=ch/DC=cern/CN=CERN Root CA]>"
credDict = {
'DN': '/DC=ch/DC=cern/OU=computers/CN=volhcb12.cern.ch',
'extraCredentials': 'hosts',
'group': 'visitor',
'CN': 'volhcb12.cern.ch',
'x509Chain': x509Chain,
'username': 'anonymous',
'isLimitedProxy': False,
'properties': [FC_MANAGEMENT],
'isProxy': False}
isAdmin = False
proxyUser = 'anonymous'
proxyGroup = 'visitor'
# TESTS WERE DESIGNED WITH THIS CONFIGURATION
# DATABASE_CONFIG = { 'UserGroupManager' : 'UserAndGroupManagerDB',
# 'SEManager' : 'SEManagerDB',
# 'SecurityManager' : 'NoSecurityManager',
# 'DirectoryManager' : 'DirectoryLevelTree',
# 'FileManager' : 'FileManager',
# 'DirectoryMetadata' : 'DirectoryMetadata',
# 'FileMetadata' : 'FileMetadata',
# 'DatasetManager' : 'DatasetManager',
# 'UniqueGUID' : False,
# 'GlobalReadAccess' : True,
# 'LFNPFNConvention' : 'Strong',
# 'ResolvePFN' : True,
# 'DefaultUmask' : 0775,
# 'ValidFileStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'ValidReplicaStatus' : ['AprioriGood', 'Trash', 'Removing', 'Probing'],
# 'VisibleFileStatus' : ['AprioriGood'],
# 'VisibleReplicaStatus': ['AprioriGood'] }
DATABASE_CONFIG = {
'UserGroupManager': 'UserAndGroupManagerDB', # UserAndGroupManagerDB, UserAndGroupManagerCS
'SEManager': 'SEManagerDB', # SEManagerDB, SEManagerCS
# NoSecurityManager, DirectorySecurityManager, FullSecurityManager
'SecurityManager': 'NoSecurityManager',
# DirectorySimpleTree, DirectoryFlatTree, DirectoryNodeTree, DirectoryLevelTree
'DirectoryManager': 'DirectoryLevelTree',
'FileManager': 'FileManager', # FileManagerFlat, FileManager
'DirectoryMetadata': 'DirectoryMetadata',
'FileMetadata': 'FileMetadata',
'DatasetManager': 'DatasetManager',
'UniqueGUID': True,
'GlobalReadAccess': True,
'LFNPFNConvention': 'Strong',
'ResolvePFN': True,
'DefaultUmask': 0o775,
'ValidFileStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'ValidReplicaStatus': ['AprioriGood', 'Trash', 'Removing', 'Probing'],
'VisibleFileStatus': ['AprioriGood'],
'VisibleReplicaStatus': ['AprioriGood']}
ALL_MANAGERS = {
"UserGroupManager": [
"UserAndGroupManagerDB", "UserAndGroupManagerCS"], "SEManager": [
"SEManagerDB", "SEManagerCS"], "SecurityManager": [
"NoSecurityManager", "DirectorySecurityManager", "FullSecurityManager"], "DirectoryManager": [
"DirectorySimpleTree", "DirectoryFlatTree", "DirectoryNodeTree", "DirectoryLevelTree"], "FileManager": [
"FileManagerFlat", "FileManager"], }
ALL_MANAGERS_NO_CS = {
"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": [
"NoSecurityManager",
"DirectorySecurityManager",
"FullSecurityManager"],
"DirectoryManager": [
"DirectorySimpleTree",
"DirectoryFlatTree",
"DirectoryNodeTree",
"DirectoryLevelTree"],
"FileManager": [
"FileManagerFlat",
"FileManager"],
}
DEFAULT_MANAGER = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["DirectorySecurityManagerWithDelete"],
"DirectoryManager": ["DirectoryClosure"],
"FileManager": ["FileManagerPs"],
}
DEFAULT_MANAGER_2 = {"UserGroupManager": ["UserAndGroupManagerDB"],
"SEManager": ["SEManagerDB"],
"SecurityManager": ["NoSecurityManager"],
"DirectoryManager": ["DirectoryLevelTree"],
"FileManager": ["FileManager"],
}
MANAGER_TO_TEST = DEFAULT_MANAGER
class FileCatalogDBTestCase(unittest.TestCase):
""" Base class for the FileCatalogDB test cases
"""
def setUp(self):
self.db = FileCatalogDB()
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
self.db.setConfig(DATABASE_CONFIG)
def tearDown(self):
pass
# for table in self.db._query( "Show tables;" )["Value"]:
# self.db.deleteEntries( table[0] )
class SECase (FileCatalogDBTestCase):
def test_seOperations(self):
"""Testing SE related operation"""
# create SE
ret = self.db.addSE(seName, credDict)
if isAdmin:
self.assertTrue(ret["OK"], "addSE failed when adding new SE: %s" % ret)
seId = ret["Value"]
# create it again
ret = self.db.addSE(seName, credDict)
self.assertEqual(ret["Value"], seId, "addSE failed when adding existing SE: %s" % ret)
else:
self.assertEqual(
ret["OK"],
False,
"addSE should fail when adding new SE as non admin: %s" %
ret)
# remove it
ret = self.db.deleteSE(seName, credDict)
self.assertEqual(ret["OK"], True if isAdmin else False, "deleteE failed %s" % ret)
class UserGroupCase(FileCatalogDBTestCase):
def test_userOperations(self):
"""Testing the user related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Add the user
result = self.db.addUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "AddUser failed when adding new user: %s" % result)
# Add an existing user
result = self.db.addUser(testUser, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddUser failed when adding existing user: %s" %
result)
# Fetch the list of user
result = self.db.getUsers(credDict)
self.assertEqual(result['OK'], expectedRes, "getUsers failed: %s" % result)
if isAdmin:
# Check if our user is present
self.assertEqual(testUser in result['Value'], expectedRes, "getUsers failed: %s" % result)
# remove the user we created
result = self.db.deleteUser(testUser, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteUser failed: %s" % result)
def test_groupOperations(self):
"""Testing the group related operations"""
expectedRes = None
if isAdmin:
print "Running UserTest in admin mode"
expectedRes = True
else:
print "Running UserTest in non admin mode"
expectedRes = False
# Create new group
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "AddGroup failed when adding new user: %s" % result)
result = self.db.addGroup(testGroup, credDict)
self.assertEqual(
result['OK'],
expectedRes,
"AddGroup failed when adding existing user: %s" %
result)
result = self.db.getGroups(credDict)
self.assertEqual(result['OK'], expectedRes, "getGroups failed: %s" % result)
if isAdmin:
self.assertEqual(testGroup in result['Value'], expectedRes)
result = self.db.deleteGroup(testGroup, credDict)
self.assertEqual(result['OK'], expectedRes, "deleteGroup failed: %s" % result)
class FileCase(FileCatalogDBTestCase):
def test_fileOperations(self):
"""
Tests the File related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
result = self.db.exists(testFile, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile) should be the same lfn %s" % result)
result = self.db.exists({testFile: '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
result = self.db.exists({testFile: {'GUID': '1000', 'PFN': 'blabla'}}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
# In fact, we don't check if the GUID is correct...
result = self.db.exists({testFile: '1001'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result)
result = self.db.exists({testFile + '2': '1000'}, credDict)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile + '2'),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result)
# Re-adding the same file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with same param %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"addFile failed: it should be possible to add an existing lfn with same param %s" %
result)
# Adding same file with different param
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '1'}}, credDict)
self.assertTrue(
result["OK"],
"addFile failed when adding existing file with different parem %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing lfn with different param %s" %
result)
result = self.db.addFile({testFile + '2': {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile +
'2' in result["Value"]["Failed"],
"addFile failed: it should not be possible to add a new lfn with existing GUID %s" %
result)
##################################################################################
# Setting existing status of existing file
result = self.db.setFileStatus({testFile: "AprioriGood"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setFileStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting unexisting status of existing file
result = self.db.setFileStatus({testFile: "Happy"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting un-existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setFileStatus should have failed %s" %
result)
# Setting existing status of unexisting file
result = self.db.setFileStatus({nonExistingFile: "Trash"}, credDict)
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of non-existing file %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setFileStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
##################################################################################
result = self.db.isFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "isFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"isFile : %s should be seen as a file %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(nonExistingFile,
result))
self.assertTrue(result["Value"]["Successful"][nonExistingFile] is False,
"isFile : %s should be seen as a file %s" % (nonExistingFile, result))
result = self.db.changePathOwner({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathGroup({testFile: "toto", nonExistingFile: "tata"}, credDict)
self.assertTrue(result["OK"], "changePathGroup failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.changePathMode({testFile: 0o44, nonExistingFile: 0o44}, credDict)
self.assertTrue(result["OK"], "changePathMode failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathMode : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileSize([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileSize failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileSize : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile],
123,
"getFileSize got incorrect file size %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileSize : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.db.getFileMetadata([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "getFileMetadata failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileMetadata : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile]["Owner"],
"toto",
"getFileMetadata got incorrect Owner %s" %
result)
self.assertEqual(
result["Value"]["Successful"][testFile]["Status"],
"AprioriGood",
"getFileMetadata got incorrect status %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileMetadata : %s should be in Failed %s" %
(nonExistingFile,
result))
# DOES NOT FOLLOW THE SUCCESSFUL/FAILED CONVENTION
# result = self.db.getFileDetails([testFile, nonExistingFile], credDict)
# self.assertTrue(result["OK"], "getFileDetails failed: %s" % result)
# self.assertTrue(
# testFile in result["Value"]["Successful"],
# "getFileDetails : %s should be in Successful %s" %
# (testFile,
# result))
# self.assertEqual(
# result["Value"]["Successful"][testFile]["Owner"],
# "toto",
# "getFileDetails got incorrect Owner %s" %
# result)
# self.assertTrue(
# nonExistingFile in result["Value"]["Failed"],
# "getFileDetails : %s should be in Failed %s" %
# (nonExistingFile,
# result))
# ADD SOMETHING ABOUT FILE ANCESTORS AND DESCENDENTS
result = self.db.getSEDump('testSE')
self.assertTrue(result['OK'], "Error when getting SE dump %s" % result)
self.assertEqual(result['Value'], ((testFile, '0', 123),),
"Did not get the expected SE Dump %s" % result['Value'])
result = self.db.removeFile([testFile, nonExistingFile], credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"removeFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"removeFile : %s should be in True %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingFile],
"removeFile : %s should be in True %s" %
(nonExistingFile,
result))
class ReplicaCase(FileCatalogDBTestCase):
def test_replicaOperations(self):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Adding new replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding the same replica
result = self.db.addReplica({testFile: {"PFN": "testFile", "SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding replica of a non existing file
result = self.db.addReplica({nonExistingFile: {"PFN": "Idontexist", "SE": "otherSE"}}, credDict)
self.assertTrue(
result['OK'],
"addReplica failed when adding Replica to non existing Replica %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Failed"],
"addReplica for non existing file should go in Failed %s" %
result)
# Setting existing status of existing Replica
result = self.db.setReplicaStatus({testFile: {"Status": "Trash", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setReplicaStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting non existing status of existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "randomStatus", "SE": "otherSE"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting non-existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing Replica
result = self.db.setReplicaStatus(
{testFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing File
result = self.db.setReplicaStatus(
{nonExistingFile: {"Status": "Trash", "SE": "nonExistingSe"}}, credDict)
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(nonExistingFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting existing status of existing Replica but not visible
result = self.db.getReplicaStatus({testFile: "otherSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica but not visible %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting status of non-existing File but not visible
result = self.db.getReplicaStatus({nonExistingFile: "testSE"}, credDict)
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting status of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicaStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.db.getReplicas([testFile, nonExistingFile], allStatus=True, credDict=credDict)
self.assertTrue(result["OK"], "getReplicas failed %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicas failed, %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile], {
"otherSE": "", "testSE": ""}, "getReplicas failed, %s should be in Successful %s" %
(testFile, result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicas failed, %s should be in Failed %s" %
(nonExistingFile,
result))
# removing master replica
result = self.db.removeReplica({testFile: {"SE": "testSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing master Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing master Replica %s" %
result)
# removing non existing replica of existing File
result = self.db.removeReplica({testFile: {"SE": "nonExistingSe2"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing non existing Replica %s" %
result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing new Replica %s" %
result)
# removing non existing replica of non existing file
result = self.db.removeReplica({nonExistingFile: {"SE": "nonExistingSe3"}}, credDict)
self.assertTrue(
result['OK'],
"removeReplica failed when removing replica of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Successful"],
"removeReplica of non existing file, %s should be in Successful %s" %
(nonExistingFile,
result))
# removing last replica
result = self.db.removeReplica({testFile: {"SE": "otherSE"}}, credDict)
self.assertTrue(result['OK'], "removeReplica failed when removing last Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing last Replica %s" %
result)
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
class DirectoryCase(FileCatalogDBTestCase):
def test_directoryOperations(self):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result['OK'], "addDirectory failed when adding new directory %s" % result)
result = self.db.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}}, credDict)
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Re-adding the same directory (CAUTION, different from addFile)
result = self.db.createDirectory(testDir, credDict)
self.assertTrue(result["OK"], "addDirectory failed when adding existing directory %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"addDirectory failed: it should be possible to add an existing lfn %s" %
result)
result = self.db.isDirectory([testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "isDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"isDirectory : %s should be seen as a directory %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir] is False,
"isDirectory : %s should be seen as a directory %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, False, credDict)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.getDirectorySize([testDir, nonExistingDir], False, True, credDict)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize (calc) : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.db.listDirectory([parentDir, testDir, nonExistingDir], credDict)
self.assertTrue(result["OK"], "listDirectory failed: %s" % result)
self.assertTrue(
parentDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(result["Value"]["Successful"][parentDir]["SubDirs"].keys(), [testDir],
"listDir : incorrect content for %s (%s)" % (parentDir, result))
self.assertTrue(
testDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir]["Files"].keys(), [testFile.split("/")[-1]],
"listDir : incorrect content for %s (%s)" % (testDir, result))
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"listDirectory : %s should be in Failed %s" %
(nonExistingDir,
result))
# We do it two times to make sure that
# when updating something to the same value
# returns a success if it is allowed
for attempt in xrange(2):
print "Attempt %s" % (attempt + 1)
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict)
result = self.db.changePathOwner({parentDir: "toto"}, credDict)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Mode'),
0o775,
"testDir should not have changed %s" %
result2)
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed %s" %
result2)
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed %s" %
result2)
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], "changePathOwner : \
# %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
pass
# Only admin can change path group
resultM = self.db.changePathMode({parentDir: 0o777}, credDict, True)
result = self.db.changePathOwner({parentDir: "toto"}, credDict, True)
resultG = self.db.changePathGroup({parentDir: "toto"}, credDict, True)
result2 = self.db.getDirectoryMetadata([parentDir, testDir], credDict)
result3 = self.db.getFileMetadata(testFile, credDict)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
self.assertTrue(result3["OK"], "getFileMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Mode'), 0o777, "testDir should have mode %s %s" %
(0o777, result2))
self.assertEqual(
result3['Value'].get(
'Successful', {}).get(
testFile, {}).get('Mode'), 0o777, "testFile should have mode %s %s" %
(0o777, result3))
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Owner'), 'toto', "testDir should belong to %s %s" %
(proxyUser, result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
'toto',
"testFile should belong to %s %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
'toto',
"testDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
'toto',
"testFile should belong to %s %s" %
(proxyGroup,
result3))
else:
# depends on the policy manager so I comment
# self.assertTrue( parentDir in result["Value"]["Failed"], \
# "changePathOwner : %s should be in Failed %s" % ( parentDir, result ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'Owner' ), \
# proxyUser, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'Owner' ), \
# proxyUser, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'Owner' ), \
# proxyUser, "testFile should not have changed %s" % result3 )
#
# self.assertTrue( parentDir in resultG["Value"]["Failed"], \
# "changePathGroup : %s should be in Failed %s" % ( parentDir, resultG ) )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( parentDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "parentDir should not have changed %s" % result2 )
# self.assertEqual( result2['Value'].get( 'Successful', {} ).get( testDir, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testDir should not have changed %s" % result2 )
# self.assertEqual( result3['Value'].get( 'Successful', {} ).get( testFile, {} ).get( 'OwnerGroup' ), \
# proxyGroup, "testFile should not have changed %s" % result3 )
pass
# Cleaning after us
result = self.db.removeFile(testFile, credDict)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
pathParts = testDir.split('/')[1:]
startDir = '/'
pathToRemove = []
for part in pathParts:
startDir = os.path.join(startDir, part)
pathToRemove.append(startDir)
pathToRemove.reverse()
for toRemove in pathToRemove:
result = self.db.removeDirectory(toRemove, credDict)
self.assertTrue(result["OK"], "removeDirectory failed: %s" % result)
class DirectoryUsageCase (FileCatalogDBTestCase):
def getPhysicalSize(self, sizeDict, dirName, seName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
val = sizeDict[dirName]['PhysicalSize'][seName]
files = val['Files']
size = val['Size']
return (files, size)
def getLogicalSize(self, sizeDict, dirName):
""" Extract the information from a ret dictionary
and return the tuple (files, size) for a given
directory and a se
"""
files = sizeDict[dirName]['LogicalFiles']
size = sizeDict[dirName]['LogicalSize']
return (files, size)
def getAndCompareDirectorySize(self, dirList):
""" Fetch the directory size from the DirectoryUsage table
and calculate it, compare the results, and then return
the values
"""
retTable = self.db.getDirectorySize(dirList, True, False, credDict)
retCalc = self.db.getDirectorySize(dirList, True, True, credDict)
self.assertTrue(retTable["OK"])
self.assertTrue(retCalc["OK"])
succTable = retTable['Value']['Successful']
succCalc = retCalc['Value']['Successful']
# Since we have simple type, the == is recursive for dict :-)
retEquals = (succTable == succCalc)
self.assertTrue(retEquals, "Calc and table results different %s %s" % (succTable, succCalc))
return retTable
def test_directoryUsage(self):
"""Testing DirectoryUsage related operation"""
# create SE
# Only admin can run that
if not isAdmin:
return
d1 = '/sizeTest/d1'
d2 = '/sizeTest/d2'
f1 = d1 + '/f1'
f2 = d1 + '/f2'
f3 = d2 + '/f3'
f1Size = 3000000000
f2Size = 3000000001
f3Size = 3000000002
# f1Size = 1
# f2Size = 2
# f3Size = 5
for sen in ['se1', 'se2', 'se3']:
ret = self.db.addSE(sen, credDict)
self.assertTrue(ret["OK"])
for din in [d1, d2]:
ret = self.db.createDirectory(din, credDict)
self.assertTrue(ret["OK"])
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se2',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.addReplica({f1: {"PFN": "f1se2", "SE": "se2"},
f2: {"PFN": "f1se3", "SE": "se3"}},
credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(
d1s2, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1s2, (2, f1Size + f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s2 = self.getPhysicalSize(val, d1, 'se2')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s2, (1, f2Size), "Unexpected size %s, expected %s" % (d1s2, (1, f2Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.removeReplica({f2: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
# Here we should have the KeyError, since there are no files left on s1 in principle
try:
d1s2 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s2 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f3: {'PFN': 'f3se3',
'SE': 'se3',
'Size': f3Size,
'GUID': '1003',
'Checksum': '3'}}, credDict)
self.assertTrue(ret["OK"])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1s1 = self.getPhysicalSize(val, d1, 'se1')
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (1, f1Size), "Unexpected size %s, expected %s" % (d1s1, (1, f1Size)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f1: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(
d1l, (2, f1Size + f2Size), "Unexpected size %s, expected %s" %
(d1l, (2, f1Size + f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f1], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
d1s3 = self.getPhysicalSize(val, d1, 'se3')
d2s3 = self.getPhysicalSize(val, d2, 'se3')
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (1, f2Size), "Unexpected size %s, expected %s" % (d1s3, (1, f2Size)))
self.assertEqual(d2s3, (1, f3Size), "Unexpected size %s, expected %s" % (d2s3, (1, f3Size)))
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeReplica({f2: {"SE": "se3"},
f3: {"SE": "se3"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (1, f2Size), "Unexpected size %s, expected %s" % (d1l, (1, f2Size)))
self.assertEqual(d2l, (1, f3Size), "Unexpected size %s, expected %s" % (d2l, (1, f3Size)))
ret = self.db.removeFile([f2, f3], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1, d2])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
try:
d1s3 = self.getPhysicalSize(val, d1, 'se3')
except KeyError:
d1s3 = (0, 0)
try:
d2s3 = self.getPhysicalSize(val, d2, 'se3')
except KeyError:
d2s3 = (0, 0)
d1l = self.getLogicalSize(val, d1)
d2l = self.getLogicalSize(val, d2)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
self.assertEqual(d1s3, (0, 0), "Unexpected size %s, expected %s" % (d1s3, (0, 0)))
self.assertEqual(d2s3, (0, 0), "Unexpected size %s, expected %s" % (d2s3, (0, 0)))
# This one is silly... there are no replicas of f1, but since the file is still there,
# the logical size does not change
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
self.assertEqual(d2l, (0, 0), "Unexpected size %s, expected %s" % (d2l, (0, 0)))
# Removing Replicas and Files from the same directory
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'},
f2: {'PFN': 'f2se2',
'SE': 'se1',
'Size': f2Size,
'GUID': '1001',
'Checksum': '2'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se1"},
f2: {"SE": "se1"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s1 = self.getPhysicalSize(val, d1, 'se1')
except KeyError:
d1s1 = (0, 0)
self.assertEqual(d1s1, (0, 0), "Unexpected size %s, expected %s" % (d1s1, (0, 0)))
ret = self.db.removeFile([f1, f2], credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
d1l = self.getLogicalSize(val, d1)
self.assertEqual(d1l, (0, 0), "Unexpected size %s, expected %s" % (d1l, (0, 0)))
# Try removing a replica from a non existing SE
ret = self.db.addFile({f1: {'PFN': 'f1se1',
'SE': 'se1',
'Size': f1Size,
'GUID': '1002',
'Checksum': '1'}}, credDict)
ret = self.db.removeReplica({f1: {"SE": "se2"}}, credDict)
self.assertTrue(ret['OK'])
ret = self.getAndCompareDirectorySize([d1])
self.assertTrue(ret["OK"])
val = ret['Value']['Successful']
try:
d1s2 = self.getPhysicalSize(val, d1, 'se2')
except KeyError:
d1s2 = (0, 0)
self.assertEqual(d1s2, (0, 0), "Unexpected size %s, expected %s" % (d1s2, (0, 0)))
if __name__ == '__main__':
managerTypes = MANAGER_TO_TEST.keys()
all_combinations = list(itertools.product(*MANAGER_TO_TEST.values()))
numberOfManager = len(managerTypes)
for setup in all_combinations:
print "Running with:"
print ("".join(["\t %s : %s\n" % (managerTypes[i], setup[i]) for i in xrange(numberOfManager)]))
for i in xrange(numberOfManager):
DATABASE_CONFIG[managerTypes[i]] = setup[i]
suite = unittest.defaultTestLoader.loadTestsFromTestCase(SECase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReplicaCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryUsageCase))
# Then run without admin privilege:
isAdmin = False
if FC_MANAGEMENT in credDict['properties']:
credDict['properties'].remove(FC_MANAGEMENT)
print "Running test without admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# First run with admin privilege:
isAdmin = True
if FC_MANAGEMENT not in credDict['properties']:
credDict['properties'].append(FC_MANAGEMENT)
print "Running test with admin privileges"
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| gpl-3.0 |
Eric89GXL/mne-python | mne/datasets/sleep_physionet/temazepam.py | 8 | 4230 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import numpy as np
from ...utils import verbose
from ._utils import _fetch_one, _data_path, TEMAZEPAM_SLEEP_RECORDS
from ._utils import _check_subjects
data_path = _data_path # expose _data_path(..) as data_path(..)
BASE_URL = 'https://physionet.org/physiobank/database/sleep-edfx/sleep-telemetry/' # noqa: E501
@verbose
def fetch_data(subjects, recording=[b'Placebo', 'temazepam'],
path=None, force_update=False,
update_path=None, base_url=BASE_URL, verbose=None):
"""Get paths to local copies of PhysioNet Polysomnography dataset files.
This will fetch data from the publicly available subjects from PhysioNet's
study of Temazepam effects on sleep [1]_. This corresponds to
a set of 22 subjects. Subjects had mild difficulty falling asleep
but were otherwise healthy.
See more details in the `physionet website
<https://physionet.org/physiobank/database/sleep-edfx/>`_.
Parameters
----------
subjects : list of int
The subjects to use. Can be in the range of 0-21 (inclusive).
path : None | str
Location of where to look for the PhysioNet data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist,
the "~/mne_data" directory is used. If the Polysomnography dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
paths : list
List of local data paths of the given type.
Notes
-----
For example, one could do:
>>> from mne.datasets import sleep_physionet
>>> sleep_physionet.temazepam.fetch_data(subjects=[1]) # doctest: +SKIP
This would download data for subject 0 if it isn't there already.
References
----------
.. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis
of a sleep-dependent neuronal feedback loop: the slow-wave
microcontinuity of the EEG. IEEE-BME 47(9):1185-1194 (2000).
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
Research Resource for Complex Physiologic Signals.
Circulation 101(23):e215-e220
See Also
--------
:func:`mne.datasets.sleep_physionet.age.fetch_data`
"""
records = np.loadtxt(TEMAZEPAM_SLEEP_RECORDS,
skiprows=1,
delimiter=',',
usecols=(0, 3, 6, 7, 8, 9),
dtype={'names': ('subject', 'record', 'hyp sha',
'psg sha', 'hyp fname', 'psg fname'),
'formats': ('<i2', '<S15', 'S40', 'S40',
'<S22', '<S16')}
)
_check_subjects(subjects, 22)
path = data_path(path=path, update_path=update_path)
params = [path, force_update, base_url]
fnames = []
for subject in subjects: # all the subjects are present at this point
for idx in np.where(records['subject'] == subject)[0]:
if records['record'][idx] == b'Placebo':
psg_fname = _fetch_one(records['psg fname'][idx].decode(),
records['psg sha'][idx].decode(),
*params)
hyp_fname = _fetch_one(records['hyp fname'][idx].decode(),
records['hyp sha'][idx].decode(),
*params)
fnames.append([psg_fname, hyp_fname])
return fnames
| bsd-3-clause |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/virtual_network_peering_py3.py | 1 | 4752 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network. The remote virtual network can be in the same or different region
(preview). See here to register for the preview and learn more
(https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:type remote_virtual_network:
~azure.mgmt.network.v2017_10_01.models.SubResource
:param remote_address_space: The reference of the remote virtual network
address space.
:type remote_address_space:
~azure.mgmt.network.v2017_10_01.models.AddressSpace
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, allow_virtual_network_access: bool=None, allow_forwarded_traffic: bool=None, allow_gateway_transit: bool=None, use_remote_gateways: bool=None, remote_virtual_network=None, remote_address_space=None, peering_state=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkPeering, self).__init__(id=id, **kwargs)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.remote_address_space = remote_address_space
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| mit |
justnom/ansible-modules-core | cloud/openstack/os_image.py | 109 | 6129 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space required to deploy this image
required: false
default: None
min_ram:
description:
- The minimum ram required to deploy this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
descrption:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
descrption:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
owner = dict(default=None),
min_disk = dict(default=None),
min_ram = dict(default=None),
is_public = dict(default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
| gpl-3.0 |
jarvys/django-1.7-jdb | django/contrib/gis/geos/prototypes/errcheck.py | 66 | 3563 | """
Error checking functions for GEOS ctypes prototype functions.
"""
import os
from ctypes import c_void_p, string_at, CDLL
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOS_VERSION
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Getting the `free` routine used to free the memory allocated for
# string pointers returned by GEOS.
if GEOS_VERSION >= (3, 1, 1):
# In versions 3.1.1 and above, `GEOSFree` was added to the C API
# because `free` isn't always available on all platforms.
free = GEOSFunc('GEOSFree')
free.argtypes = [c_void_p]
free.restype = None
else:
# Getting the `free` routine from the C library of the platform.
if os.name == 'nt':
# On NT, use the MS C library.
libc = CDLL('msvcrt')
else:
# On POSIX platforms C library is obtained by passing None into `CDLL`.
libc = CDLL(None)
free = libc.free
### ctypes error checking routines ###
def last_arg_byref(args):
"Returns the last C argument's value by reference."
return args[-1]._obj.value
def check_dbl(result, func, cargs):
"Checks the status code and returns the double value passed in by reference."
# Checking the status code
if result != 1:
return None
# Double passed in by reference, return its value.
return last_arg_byref(cargs)
def check_geom(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Geometry returned from GEOS C function "%s".' % func.__name__)
return result
def check_minus_one(result, func, cargs):
"Error checking on routines that should not return -1."
if result == -1:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
def check_predicate(result, func, cargs):
"Error checking for unary/binary predicate functions."
val = ord(result) # getting the ordinal from the character
if val == 1:
return True
elif val == 0:
return False
else:
raise GEOSException('Error encountered on GEOS C predicate function "%s".' % func.__name__)
def check_sized_string(result, func, cargs):
"""
Error checking for routines that return explicitly sized strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Invalid string pointer returned by GEOS C function "%s"' % func.__name__)
# A c_size_t object is passed in by reference for the second
# argument on these routines, and its needed to determine the
# correct size.
s = string_at(result, last_arg_byref(cargs))
# Freeing the memory allocated within GEOS
free(result)
return s
def check_string(result, func, cargs):
"""
Error checking for routines that return strings.
This frees the memory allocated by GEOS at the result pointer.
"""
if not result:
raise GEOSException('Error encountered checking string return value in GEOS C function "%s".' % func.__name__)
# Getting the string value at the pointer address.
s = string_at(result)
# Freeing the memory allocated within GEOS
free(result)
return s
def check_zero(result, func, cargs):
"Error checking on routines that should not return 0."
if result == 0:
raise GEOSException('Error encountered in GEOS C function "%s".' % func.__name__)
else:
return result
| bsd-3-clause |
cynngah/uofthacksIV | generate-jobs/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/shutil.py | 395 | 25647 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registry."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| mit |
NelleV/pyconfr-test | symposion_project/settings.py | 1 | 6854 | # -*- coding: utf-8 -*-
# Django settings for account project
import os.path
import posixpath
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "[email protected]"),
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"NAME": "dev.db", # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/site_media/static/"
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = "8*br)9@fs!4nzg-imfrsst&oa2udy6z-fqtdk0*e5c1=wn)(t3"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.transaction.TransactionMiddleware",
"reversion.middleware.RevisionMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "symposion_project.urls"
TEMPLATE_DIRS = [
os.path.join(PACKAGE_ROOT, "templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pinax_utils.context_processors.settings",
"account.context_processors.account",
"symposion.reviews.context_processors.reviews",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
# theme
"pinax_theme_bootstrap_account",
"pinax_theme_bootstrap",
"django_forms_bootstrap",
# external
"debug_toolbar",
"mailer",
"timezones",
"metron",
"markitup",
"taggit",
"reversion",
"easy_thumbnails",
"sitetree",
"account",
# symposion
"symposion",
"symposion.sponsorship",
"symposion.conference",
"symposion.cms",
"symposion.boxes",
"symposion.proposals",
"symposion.speakers",
"symposion.teams",
"symposion.reviews",
"symposion.schedule",
# project
"symposion_project.proposals",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
EMAIL_BACKEND = "mailer.backend.DbBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
ACCOUNT_SIGNUP_REDIRECT_URL = "dashboard"
ACCOUNT_LOGIN_REDIRECT_URL = "dashboard"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_USER_DISPLAY = lambda user: user.email
AUTHENTICATION_BACKENDS = [
# Permissions Backends
"symposion.teams.backends.TeamPermissionsBackend",
# Auth backends
"account.auth_backends.EmailAuthenticationBackend",
]
LOGIN_URL = "/account/login/" # @@@ any way this can be a url name?
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
MARKITUP_FILTER = ("markdown.markdown", {"safe_mode": True})
MARKITUP_SET = "markitup/sets/markdown"
MARKITUP_SKIN = "markitup/skins/simple"
CONFERENCE_ID = 1
SYMPOSION_PAGE_REGEX = r"(([\w-]{1,})(/[\w-]{1,})*)/"
PROPOSAL_FORMS = {
"tutorial": "symposion_project.proposals.forms.TutorialProposalForm",
"talk": "symposion_project.proposals.forms.TalkProposalForm",
"poster": "symposion_project.proposals.forms.PosterProposalForm",
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| bsd-3-clause |
muffinresearch/olympia | apps/zadmin/tests/test_tasks.py | 10 | 6460 | # -*- coding: utf-8 -*-
from django.conf import settings
import mock
import urlparse
import amo
import amo.tests
from addons.models import Addon
from applications.models import AppVersion
from files.utils import make_xpi
from versions.compare import version_int
from zadmin import tasks
def RequestMock(response='', headers=None):
"""Mocks the request objects of urllib2 and requests modules."""
res = mock.Mock()
res.read.return_value = response
res.contents = response
res.text = response
res.iter_lines.side_effect = lambda chunk_size=1: (response.split('\n')
.__iter__())
res.iter_content.side_effect = lambda chunk_size=1: (response,).__iter__()
def lines():
return [l + '\n' for l in response.split('\n')[:-1]]
res.readlines.side_effect = lines
res.iter_lines.side_effect = lambda: lines().__iter__()
res.headers = headers or {}
res.headers['content-length'] = len(response)
return res
def make_langpack(version):
versions = (version, '%s.*' % version)
for version in versions:
AppVersion.objects.get_or_create(application=amo.FIREFOX.id,
version=version,
version_int=version_int(version))
return make_xpi({
'install.rdf': """<?xml version="1.0"?>
<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest"
em:id="[email protected]"
em:name="Foo Language Pack"
em:version="{0}"
em:type="8"
em:creator="mozilla.org">
<em:targetApplication>
<Description>
<em:id>{{ec8030f7-c20a-464f-9b0e-13a3a9e97384}}</em:id>
<em:minVersion>{0}</em:minVersion>
<em:maxVersion>{1}</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
""".format(*versions)
}).read()
class TestLangpackFetcher(amo.tests.TestCase):
fixtures = ['zadmin/users']
LISTING = 'pretend-this-is-a-sha256-sum win32/xpi/de-DE.xpi\n'
def setUp(self):
super(TestLangpackFetcher, self).setUp()
request_patch = mock.patch('zadmin.tasks.requests.get')
self.mock_request = request_patch.start()
self.addCleanup(request_patch.stop)
def get_langpacks(self):
return (Addon.objects.no_cache()
.filter(addonuser__user__email=settings.LANGPACK_OWNER_EMAIL,
type=amo.ADDON_LPAPP))
def fetch_langpacks(self, version):
path = settings.LANGPACK_PATH_DEFAULT % ('firefox', version)
base_url = urlparse.urljoin(settings.LANGPACK_DOWNLOAD_BASE, path)
list_url = urlparse.urljoin(base_url, settings.LANGPACK_MANIFEST_PATH)
langpack_url = urlparse.urljoin(base_url, 'de-DE.xpi')
responses = {list_url: RequestMock(self.LISTING),
langpack_url: RequestMock(make_langpack(version))}
self.mock_request.reset_mock()
self.mock_request.side_effect = lambda url, **kw: responses.get(url)
tasks.fetch_langpacks(path)
self.mock_request.assert_has_calls(
[mock.call(list_url, verify=settings.CA_CERT_BUNDLE_PATH),
mock.call(langpack_url, verify=settings.CA_CERT_BUNDLE_PATH)])
def test_fetch_new_langpack(self):
assert self.get_langpacks().count() == 0
self.fetch_langpacks(amo.FIREFOX.latest_version)
langpacks = self.get_langpacks()
assert langpacks.count() == 1
a = langpacks[0]
assert a.default_locale == 'de-DE'
assert a.target_locale == 'de-DE'
assert a._current_version
assert a.current_version.version == amo.FIREFOX.latest_version
assert a.status == amo.STATUS_PUBLIC
assert a.current_version.files.all()[0].status == amo.STATUS_PUBLIC
def test_fetch_updated_langpack(self):
versions = ('16.0', '17.0')
self.fetch_langpacks(versions[0])
assert self.get_langpacks().count() == 1
self.fetch_langpacks(versions[1])
langpacks = self.get_langpacks()
assert langpacks.count() == 1
a = langpacks[0]
assert a.versions.count() == 2
v = a.versions.get(version=versions[1])
assert v.files.all()[0].status == amo.STATUS_PUBLIC
def test_fetch_duplicate_langpack(self):
self.fetch_langpacks(amo.FIREFOX.latest_version)
langpacks = self.get_langpacks()
assert langpacks.count() == 1
assert langpacks[0].versions.count() == 1
assert (langpacks[0].versions.all()[0].version ==
amo.FIREFOX.latest_version)
self.fetch_langpacks(amo.FIREFOX.latest_version)
langpacks = self.get_langpacks()
assert langpacks.count() == 1
assert langpacks[0].versions.count() == 1
assert (langpacks[0].versions.all()[0].version ==
amo.FIREFOX.latest_version)
def test_fetch_updated_langpack_beta(self):
versions = ('16.0', '16.0a2')
self.fetch_langpacks(versions[0])
assert self.get_langpacks().count() == 1
self.fetch_langpacks(versions[1])
langpacks = self.get_langpacks()
assert langpacks.count() == 1
a = langpacks[0]
assert a.versions.count() == 2
v = a.versions.get(version=versions[1])
assert v.files.all()[0].status == amo.STATUS_BETA
def test_fetch_new_langpack_beta(self):
self.fetch_langpacks('16.0a2')
assert self.get_langpacks().count() == 0
def test_fetch_langpack_wrong_owner(self):
Addon.objects.create(guid='[email protected]',
type=amo.ADDON_LPAPP)
self.fetch_langpacks(amo.FIREFOX.latest_version)
assert self.get_langpacks().count() == 0
def test_fetch_langpack_invalid_path_fails(self):
self.mock_request.return_value = None
with self.assertRaises(ValueError) as exc:
tasks.fetch_langpacks('../foo/')
assert str(exc.exception) == 'Invalid path'
| bsd-3-clause |
javipalanca/Django-facebook | django_facebook/auth_urls.py | 24 | 2822 | """
URL patterns for the views included in ``django.contrib.auth``.
Including these URLs (via the ``include()`` directive) will set up the
following patterns based at whatever URL prefix they are included
under:
* User login at ``login/``.
* User logout at ``logout/``.
* The two-step password change at ``password/change/`` and
``password/change/done/``.
* The four-step password reset at ``password/reset/``,
``password/reset/confirm/``, ``password/reset/complete/`` and
``password/reset/done/``.
The default registration backend already has an ``include()`` for
these URLs, so under the default setup it is not necessary to manually
include these views. Other backends may or may not include them;
consult a specific backend's documentation for details.
"""
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from django.contrib.auth import views as auth_views
from django_facebook import registration_views
from django_facebook.utils import replication_safe
urlpatterns = patterns('',
url(r'^login/$',
replication_safe(auth_views.login),
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
replication_safe(auth_views.logout),
{'template_name': 'registration/logout.html'},
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='auth_password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
name='auth_password_reset'),
url(
r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
url(r'^register/$',
registration_views.register,
name='registration_register'),
)
| bsd-3-clause |
atlashealth/ansible | lib/ansible/plugins/connections/jail.py | 131 | 7291 | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# and chroot.py (c) 2013, Maykel Moya <[email protected]>
# (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import traceback
import os
import shlex
import subprocess
from ansible import errors
from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
BUFSIZE = 65536
class Connection(object):
''' Local BSD Jail based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise errors.AnsibleError("%s command not found in PATH") % executable
return cmd
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stdout.split()
def get_jail_path(self):
p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# remove \n
return stdout[:-1]
def __init__(self, runner, host, port, *args, **kwargs):
self.jail = host
self.runner = runner
self.host = host
self.has_pipelining = False
self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("jail connection requires running as root")
self.jls_cmd = self._search_executable('jls')
self.jexec_cmd = self._search_executable('jexec')
if not self.jail in self.list_jails():
raise errors.AnsibleError("incorrect jail name %s" % self.jail)
self.host = host
# port is unused, since this is local
self.port = port
def connect(self, port=None):
''' connect to the jail; nothing to do here '''
vvv("THIS IS A LOCAL JAIL DIR", host=self.jail)
return self
# a modifier
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
else:
# Prev to python2.7.3, shlex couldn't handle unicode type strings
cmd = to_bytes(cmd)
cmd = shlex.split(cmd)
local_cmd = [self.jexec_cmd, self.jail]
local_cmd += cmd
return local_cmd
def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
''' run a command on the jail. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.jail)
p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the jail '''
p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to jail '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
except OSError:
raise errors.AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from jail to local '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
except OSError:
raise errors.AnsibleError("jail connection requires dd command in the jail")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 |
vinayvenu/orthanc | Resources/Samples/Python/ChangesLoop.py | 9 | 2347 | #!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2015 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import RestToolbox
##
## Print help message
##
if len(sys.argv) != 3:
print("""
Sample script that continuously monitors the arrival of new DICOM
images into Orthanc (through the Changes API).
Usage: %s [hostname] [HTTP port]
For instance: %s localhost 8042
""" % (sys.argv[0], sys.argv[0]))
exit(-1)
URL = 'http://%s:%d' % (sys.argv[1], int(sys.argv[2]))
##
## The following function is called each time a new instance is
## received.
##
def NewInstanceReceived(path):
global URL
patientName = RestToolbox.DoGet(URL + path + '/content/PatientName')
# Remove the possible trailing characters due to DICOM padding
patientName = patientName.strip()
print('New instance received for patient "%s": "%s"' % (patientName, path))
##
## Main loop that listens to the changes API.
##
current = 0
while True:
r = RestToolbox.DoGet(URL + '/changes', {
'since' : current,
'limit' : 4 # Retrieve at most 4 changes at once
})
for change in r['Changes']:
# We are only interested interested in the arrival of new instances
if change['ChangeType'] == 'NewInstance':
# Call the callback function
path = change['Path']
NewInstanceReceived(path)
# Delete the instance once it has been discovered
RestToolbox.DoDelete(URL + path)
current = r['Last']
if r['Done']:
print('Everything has been processed: Waiting...')
time.sleep(1)
| gpl-3.0 |
akalipetis/django-rest-framework | rest_framework/permissions.py | 71 | 6444 | """
Provides a set of pluggable permission policies.
"""
from __future__ import unicode_literals
from django.http import Http404
from rest_framework.compat import get_model_name
SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
class BasePermission(object):
"""
A base class from which all permission classes should inherit.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
def has_object_permission(self, request, view, obj):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
class AllowAny(BasePermission):
"""
Allow any access.
This isn't strictly required, since you could use an empty
permission_classes list, but it's useful because it makes the intention
more explicit.
"""
def has_permission(self, request, view):
return True
class IsAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
class IsAdminUser(BasePermission):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_staff
class IsAuthenticatedOrReadOnly(BasePermission):
"""
The request is authenticated as a user, or is a read-only request.
"""
def has_permission(self, request, view):
return (
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated()
)
class DjangoModelPermissions(BasePermission):
"""
The request is authenticated using `django.contrib.auth` permissions.
See: https://docs.djangoproject.com/en/dev/topics/auth/#permissions
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the model.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
# Map methods into required permission codes.
# Override this if you need to also provide 'view' permissions,
# or if you want to provide custom permission codes.
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
authenticated_users_only = True
def get_required_permissions(self, method, model_cls):
"""
Given a model and an HTTP method, return the list of permission
codes that the user is required to have.
"""
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_permission(self, request, view):
# Workaround to ensure DjangoModelPermissions are not applied
# to the root view when using DefaultRouter.
if getattr(view, '_ignore_model_permissions', False):
return True
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoModelPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
perms = self.get_required_permissions(request.method, queryset.model)
return (
request.user and
(request.user.is_authenticated() or not self.authenticated_users_only) and
request.user.has_perms(perms)
)
class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions):
"""
Similar to DjangoModelPermissions, except that anonymous users are
allowed read-only access.
"""
authenticated_users_only = False
class DjangoObjectPermissions(DjangoModelPermissions):
"""
The request is authenticated using Django's object-level permissions.
It requires an object-permissions-enabled backend, such as Django Guardian.
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the object using .has_perms.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def get_required_object_permissions(self, method, model_cls):
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_object_permission(self, request, view, obj):
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoObjectPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
| bsd-2-clause |
itmanagerro/tresting | contrib/devtools/symbol-check.py | 52 | 6191 | #!/usr/bin/python2
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# bitcoin-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
| mit |
mathdd/numpy | numpy/core/tests/test_umath.py | 29 | 69197 | from __future__ import division, absolute_import, print_function
import sys
import platform
import warnings
from numpy.testing.utils import _gen_alignment_data
import numpy.core.umath as ncu
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
dec, assert_allclose, assert_no_warnings
)
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
platform.machine().startswith('ppc')
class _FilterInvalids(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
class TestConstants(TestCase):
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
def test_e(self):
assert_allclose(ncu.e, 2.718281828459045, 1e-15)
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
class TestOut(TestCase):
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
o = np.empty(())
r = np.add(a, 2, o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=(o,), subok=subok)
assert_(r is o)
d = np.array(5.7)
o1 = np.empty(())
o2 = np.empty((), dtype=np.int32)
r1, r2 = np.frexp(d, o1, None, subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, None, o2, subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
r1, r2 = np.frexp(d, out=o1, subok=subok)
assert_(r1 is o1)
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.add, a, 2, o, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
assert_raises(TypeError, np.add, a, 2, [], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
o.flags.writeable = False
assert_raises(ValueError, np.add, a, 2, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
def test_out_wrap_subok(self):
class ArrayWrap(np.ndarray):
__array_priority__ = 10
def __new__(cls, arr):
return np.asarray(arr).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
for subok in (True, False):
a = ArrayWrap([0.5])
r = np.add(a, 2, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=(None,), subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
d = ArrayWrap([5.7])
o1 = np.empty((1,))
o2 = np.empty((1,), dtype=np.int32)
r1, r2 = np.frexp(d, o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, o1, None, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, None, o2, subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
r1, r2 = np.frexp(d, out=o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
assert_(w[0].category is DeprecationWarning)
class TestDivision(TestCase):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
if 5 / 10 == 0.5:
assert_equal(x / 100, [0.05, 0.1, 0.9, 1,
-0.05, -0.1, -0.9, -1, -1.2])
else:
assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
def test_division_complex(self):
# check that implementation is correct
msg = "Complex division implementation check"
x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
assert_almost_equal(x**2/x, x, err_msg=msg)
# check overflow, underflow
msg = "Complex division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = x**2/x
assert_almost_equal(y/x, [1, 1], err_msg=msg)
def test_zero_division_complex(self):
with np.errstate(invalid="ignore", divide="ignore"):
x = np.array([0.0], dtype=np.complex128)
y = 1.0/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.nan)/x
assert_(np.isinf(y)[0])
y = complex(np.nan, np.inf)/x
assert_(np.isinf(y)[0])
y = complex(np.inf, np.inf)/x
assert_(np.isinf(y)[0])
y = 0.0/x
assert_(np.isnan(y)[0])
def test_floor_division_complex(self):
# check that implementation is correct
msg = "Complex floor division implementation check"
x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
y = np.array([0., -1., 0., 0.], dtype=np.complex128)
assert_equal(np.floor_divide(x**2, x), y, err_msg=msg)
# check overflow, underflow
msg = "Complex floor division overflow/underflow check"
x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
class TestCbrt(TestCase):
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
def test_cbrt(self):
x = np.array([1., 2., -3., np.inf, -np.inf])
assert_almost_equal(np.cbrt(x**3), x)
assert_(np.isnan(np.cbrt(np.nan)))
assert_equal(np.cbrt(np.inf), np.inf)
assert_equal(np.cbrt(-np.inf), -np.inf)
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_equal(x**2, [1., 4., 9.])
y = x.copy()
y **= 2
assert_equal(y, [1., 4., 9.])
assert_almost_equal(x**(-1), [1., 0.5, 1./3])
assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)])
for out, inp, msg in _gen_alignment_data(dtype=np.float32,
type='unary',
max_size=11):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
for out, inp, msg in _gen_alignment_data(dtype=np.float64,
type='unary',
max_size=7):
exp = [ncu.sqrt(i) for i in inp]
assert_almost_equal(inp**(0.5), exp, err_msg=msg)
np.sqrt(inp, out=out)
assert_equal(out, exp, err_msg=msg)
def test_power_complex(self):
x = np.array([1+2j, 2+3j, 3+4j])
assert_equal(x**0, [1., 1., 1.])
assert_equal(x**1, x)
assert_almost_equal(x**2, [-3+4j, -5+12j, -7+24j])
assert_almost_equal(x**3, [(1+2j)**3, (2+3j)**3, (3+4j)**3])
assert_almost_equal(x**4, [(1+2j)**4, (2+3j)**4, (3+4j)**4])
assert_almost_equal(x**(-1), [1/(1+2j), 1/(2+3j), 1/(3+4j)])
assert_almost_equal(x**(-2), [1/(1+2j)**2, 1/(2+3j)**2, 1/(3+4j)**2])
assert_almost_equal(x**(-3), [(-11+2j)/125, (-46-9j)/2197,
(-117-44j)/15625])
assert_almost_equal(x**(0.5), [ncu.sqrt(1+2j), ncu.sqrt(2+3j),
ncu.sqrt(3+4j)])
norm = 1./((x**14)[0])
assert_almost_equal(x**14 * norm,
[i * norm for i in [-76443+16124j, 23161315+58317492j,
5583548873 + 2465133864j]])
# Ticket #836
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex_)
with np.errstate(invalid="ignore"):
assert_complex_equal(z**1, z)
assert_complex_equal(z**2, z*z)
assert_complex_equal(z**3, z*z*z)
def test_power_zero(self):
# ticket #1271
zero = np.array([0j])
one = np.array([1+0j])
cnan = np.array([complex(np.nan, np.nan)])
# FIXME cinf not tested.
#cinf = np.array([complex(np.inf, 0)])
def assert_complex_equal(x, y):
x, y = np.asarray(x), np.asarray(y)
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
# positive powers
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, p), zero)
# zero power
assert_complex_equal(np.power(zero, 0), one)
with np.errstate(invalid="ignore"):
assert_complex_equal(np.power(zero, 0+1j), cnan)
# negative power
for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]:
assert_complex_equal(np.power(zero, -p), cnan)
assert_complex_equal(np.power(zero, -1+0.2j), cnan)
def test_fast_power(self):
x = np.array([1, 2, 3], np.int16)
assert_((x**2.00001).dtype is (x**2.0).dtype)
# Check that the fast path ignores 1-element not 0-d arrays
res = x ** np.array([[[2]]])
assert_equal(res.shape, (1, 1, 3))
class TestLog2(TestCase):
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.log2(xf), yf)
def test_log2_ints(self):
# a good log2 implementation should provide this,
# might fail on OS with bad libm
for i in range(1, 65):
v = np.log2(2.**i)
assert_equal(v, float(i), err_msg='at exponent %d' % i)
def test_log2_special(self):
assert_equal(np.log2(1.), 0.)
assert_equal(np.log2(np.inf), np.inf)
assert_(np.isnan(np.log2(np.nan)))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.log2(-1.)))
assert_(np.isnan(np.log2(-np.inf)))
assert_equal(np.log2(0.), -np.inf)
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
assert_(w[2].category is RuntimeWarning)
class TestExp2(TestCase):
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)
assert_almost_equal(np.exp2(yf), xf)
class TestLogAddExp2(_FilterInvalids):
# Need test for intermediate precisions
def test_logaddexp2_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log2(np.array(x, dtype=dt))
yf = np.log2(np.array(y, dtype=dt))
zf = np.log2(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_)
def test_logaddexp2_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp2(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp2(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp2(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, 0)))
assert_(np.isnan(np.logaddexp2(0, np.nan)))
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
class TestLog(TestCase):
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.log(xf), yf)
class TestExp(TestCase):
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for dt in ['f', 'd', 'g']:
log2_ = 0.69314718055994530943
xf = np.array(x, dtype=dt)
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
x = [1, 2, 3, 4, 5]
y = [5, 4, 3, 2, 1]
z = [6, 6, 6, 6, 6]
for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]):
xf = np.log(np.array(x, dtype=dt))
yf = np.log(np.array(y, dtype=dt))
zf = np.log(np.array(z, dtype=dt))
assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_)
def test_logaddexp_range(self):
x = [1000000, -1000000, 1000200, -1000200]
y = [1000200, -1000200, 1000000, -1000000]
z = [1000200, -1000000, 1000200, -1000000]
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
def test_inf(self):
inf = np.inf
x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
z = [inf, inf, inf, -inf, inf, inf, 1, 1]
with np.errstate(invalid='raise'):
for dt in ['f', 'd', 'g']:
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
assert_equal(np.logaddexp(logxf, logyf), logzf)
def test_nan(self):
assert_(np.isnan(np.logaddexp(np.nan, np.inf)))
assert_(np.isnan(np.logaddexp(np.inf, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, 0)))
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
def test_special(self):
with np.errstate(invalid="ignore", divide="ignore"):
assert_equal(ncu.log1p(np.nan), np.nan)
assert_equal(ncu.log1p(np.inf), np.inf)
assert_equal(ncu.log1p(-1.), -np.inf)
assert_equal(ncu.log1p(-2.), np.nan)
assert_equal(ncu.log1p(-np.inf), np.nan)
class TestExpm1(TestCase):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
def test_special(self):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(0.), 0.)
assert_equal(ncu.expm1(-0.), -0.)
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(-np.inf), -1.)
class TestHypot(TestCase, object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
def assert_hypot_isnan(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isnan(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not nan" % (x, y, ncu.hypot(x, y)))
def assert_hypot_isinf(x, y):
with np.errstate(invalid='ignore'):
assert_(np.isinf(ncu.hypot(x, y)),
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
class TestHypotSpecialValues(TestCase):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
def test_nan_outputs2(self):
assert_hypot_isinf(np.nan, np.inf)
assert_hypot_isinf(np.inf, np.nan)
assert_hypot_isinf(np.inf, 0)
assert_hypot_isinf(0, np.inf)
assert_hypot_isinf(np.inf, np.inf)
assert_hypot_isinf(np.inf, 23.0)
def test_no_fpe(self):
assert_no_warnings(ncu.hypot, np.inf, 0)
def assert_arctan2_isnan(x, y):
assert_(np.isnan(ncu.arctan2(x, y)), "arctan(%s, %s) is %s, not nan" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispinf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), "arctan(%s, %s) is %s, not +inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isninf(x, y):
assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), "arctan(%s, %s) is %s, not -inf" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_ispzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not +0" % (x, y, ncu.arctan2(x, y)))
def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
class TestArctan2SpecialValues(TestCase):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi)
assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi)
def test_zero_nzero(self):
# atan2(+-0, -0) returns +-pi.
assert_almost_equal(ncu.arctan2(np.PZERO, np.NZERO), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, np.NZERO), -np.pi)
def test_zero_pzero(self):
# atan2(+-0, +0) returns +-0.
assert_arctan2_ispzero(np.PZERO, np.PZERO)
assert_arctan2_isnzero(np.NZERO, np.PZERO)
def test_zero_negative(self):
# atan2(+-0, x) returns +-pi for x < 0.
assert_almost_equal(ncu.arctan2(np.PZERO, -1), np.pi)
assert_almost_equal(ncu.arctan2(np.NZERO, -1), -np.pi)
def test_zero_positive(self):
# atan2(+-0, x) returns +-0 for x > 0.
assert_arctan2_ispzero(np.PZERO, 1)
assert_arctan2_isnzero(np.NZERO, 1)
def test_positive_zero(self):
# atan2(y, +-0) returns +pi/2 for y > 0.
assert_almost_equal(ncu.arctan2(1, np.PZERO), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(1, np.NZERO), 0.5 * np.pi)
def test_negative_zero(self):
# atan2(y, +-0) returns -pi/2 for y < 0.
assert_almost_equal(ncu.arctan2(-1, np.PZERO), -0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NZERO), -0.5 * np.pi)
def test_any_ninf(self):
# atan2(+-y, -infinity) returns +-pi for finite y > 0.
assert_almost_equal(ncu.arctan2(1, np.NINF), np.pi)
assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
def test_any_pinf(self):
# atan2(+-y, +infinity) returns +-0 for finite y > 0.
assert_arctan2_ispzero(1, np.inf)
assert_arctan2_isnzero(-1, np.inf)
def test_inf_any(self):
# atan2(+-infinity, x) returns +-pi/2 for finite x.
assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi)
def test_inf_ninf(self):
# atan2(+-infinity, -infinity) returns +-3*pi/4.
assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi)
def test_inf_pinf(self):
# atan2(+-infinity, +infinity) returns +-pi/4.
assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi)
assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi)
def test_nan_any(self):
# atan2(nan, x) returns nan for any x, including inf
assert_arctan2_isnan(np.nan, np.inf)
assert_arctan2_isnan(np.inf, np.nan)
assert_arctan2_isnan(np.nan, np.nan)
class TestLdexp(TestCase):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.float64),
np.array(3, tp)), 16.)
assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble),
np.array(3, tp)), 16.)
def test_ldexp(self):
# The default Python int type should work
assert_almost_equal(ncu.ldexp(2., 3), 16.)
# The following int types should all be accepted
self._check_ldexp(np.int8)
self._check_ldexp(np.int16)
self._check_ldexp(np.int32)
self._check_ldexp('i')
self._check_ldexp('l')
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
class TestMaximum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.maximum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1, 2j]), 1)
assert_equal(np.maximum.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.maximum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), np.object)
y = 1.0
z = np.array(float('nan'), np.object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
class TestMinimum(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.minimum.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), np.nan)
assert_equal(func(tmp2), np.nan)
def test_reduce_complex(self):
assert_equal(np.minimum.reduce([1, 2j]), 2j)
assert_equal(np.minimum.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([nan, nan, nan])
assert_equal(np.minimum(arg1, arg2), out)
def test_object_nans(self):
# Multiple checks to give this a chance to
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
x = np.array(float('nan'), np.object)
y = 1.0
z = np.array(float('nan'), np.object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([nan, nan, nan], dtype=np.complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
arg1 = np.arange(5, dtype=np.object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
class TestFmax(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmax.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 10)
assert_equal(func(tmp2), 10)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 9)
assert_equal(func(tmp2), 9)
def test_reduce_complex(self):
assert_equal(np.fmax.reduce([1, 2j]), 1)
assert_equal(np.fmax.reduce([1+3j, 2j]), 1+3j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmax(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmax(arg1, arg2), out)
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1+3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=np.complex)
arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
out = np.array([0, 0, nan], dtype=np.complex)
assert_equal(np.fmin(arg1, arg2), out)
class TestBool(TestCase):
def test_truth_table_logical(self):
# 2, 3 and 4 serves as true values
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = (np.typecodes['AllFloat']
+ np.typecodes['AllInteger']
+ '?') # boolean
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
# OR
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
# AND
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
# XOR
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_truth_table_bitwise(self):
arg1 = [False, False, True, True]
arg2 = [False, True, False, True]
out = [False, True, True, True]
assert_equal(np.bitwise_or(arg1, arg2), out)
out = [False, False, False, True]
assert_equal(np.bitwise_and(arg1, arg2), out)
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
class TestInt(TestCase):
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=np.bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
assert_array_equal(np.logical_not(x, out=os), False)
assert_array_equal(o, tgt)
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
class TestDegrees(TestCase):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
class TestRadians(TestCase):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
class TestSign(TestCase):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0])
with np.errstate(invalid='ignore'):
res = ncu.sign(a)
assert_equal(res, tgt)
res = ncu.sign(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
class TestMinMax(TestCase):
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
for dt, sz in [(np.float32, 15), (np.float64, 7)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
for i in range(inp.size):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
assert_(np.isnan(inp.max()), msg=emsg)
assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
inp[i] = -1e10
assert_equal(inp.min(), -1e10, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
class TestAbsoluteNegative(TestCase):
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary',
max_size=sz):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
self.assertTrue((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
# will throw invalid flag depending on compiler optimizations
with np.errstate(invalid='ignore'):
for v in [np.nan, -np.inf, np.inf]:
for i in range(inp.size):
d = np.arange(inp.size, dtype=dt)
inp[:] = -d
inp[i] = v
d[i] = -v if v == -np.inf else v
assert_array_equal(np.abs(inp), d, err_msg=msg)
np.abs(inp, out=out)
assert_array_equal(out, d, err_msg=msg)
assert_array_equal(-inp, -1*inp, err_msg=msg)
np.negative(inp, out=out)
assert_array_equal(out, -1*inp, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_equal(np.abs(d), d)
assert_equal(np.negative(d), -d)
np.negative(d, out=d)
np.negative(np.ones_like(d), out=d)
np.abs(d, out=d)
np.abs(np.ones_like(d), out=d)
class TestSpecialMethods(TestCase):
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
class with_wrap(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1).view(cls).copy()
def __array_wrap__(self, arr, context):
return arr.view(type(self))
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
self.assertTrue(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
# test fix for bug #826:
class A(np.ndarray):
__array_priority__ = 10
def __new__(cls):
return np.asarray(1.0, 'float64').view(cls).copy()
a = A()
x = np.float64(1)*a
self.assertTrue(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
def test_priority(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = type(self)()
r.arr = arr
r.context = context
return r
class B(A):
__array_priority__ = 20.
class C(A):
__array_priority__ = 40.
x = np.zeros(1)
a = A()
b = B()
c = C()
f = ncu.minimum
self.assertTrue(type(f(x, x)) is np.ndarray)
self.assertTrue(type(f(x, a)) is A)
self.assertTrue(type(f(x, b)) is B)
self.assertTrue(type(f(x, c)) is C)
self.assertTrue(type(f(a, x)) is A)
self.assertTrue(type(f(b, x)) is B)
self.assertTrue(type(f(c, x)) is C)
self.assertTrue(type(f(a, a)) is A)
self.assertTrue(type(f(a, b)) is B)
self.assertTrue(type(f(b, a)) is B)
self.assertTrue(type(f(b, b)) is B)
self.assertTrue(type(f(b, c)) is C)
self.assertTrue(type(f(c, b)) is C)
self.assertTrue(type(f(c, c)) is C)
self.assertTrue(type(ncu.exp(a) is A))
self.assertTrue(type(ncu.exp(b) is B))
self.assertTrue(type(ncu.exp(c) is C))
def test_failing_wrap(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
def test_prepare(self):
class with_prepare(np.ndarray):
__array_priority__ = 10
def __array_prepare__(self, arr, context):
# make sure we can return a new
return np.array(arr).view(type=with_prepare)
a = np.array(1).view(type=with_prepare)
x = np.add(a, a)
assert_equal(x, np.array(2))
assert_equal(type(x), with_prepare)
def test_failing_prepare(self):
class A(object):
def __array__(self):
return np.zeros(1)
def __array_prepare__(self, arr, context=None):
raise RuntimeError
a = A()
self.assertRaises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
self.args = args
self.i = i
return np.zeros(1)
class B(object):
def __array__(self, dtype=None):
return np.zeros(1, dtype)
class C(object):
def __array__(self):
return np.zeros(1)
a = A()
ncu.maximum(np.zeros(1), a)
self.assertTrue(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
self.assertTrue(a.args[1] is a)
self.assertTrue(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
def test_ufunc_override(self):
class A(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return self, func, method, pos, inputs, kwargs
a = A()
b = np.matrix([1])
res0 = np.multiply(a, b)
res1 = np.dot(a, b)
# self
assert_equal(res0[0], a)
assert_equal(res1[0], a)
assert_equal(res0[1], np.multiply)
assert_equal(res1[1], np.dot)
assert_equal(res0[2], '__call__')
assert_equal(res1[2], '__call__')
assert_equal(res0[3], 0)
assert_equal(res1[3], 0)
assert_equal(res0[4], (a, b))
assert_equal(res1[4], (a, b))
assert_equal(res0[5], {})
assert_equal(res1[5], {})
def test_ufunc_override_mro(self):
# Some multi arg functions for testing.
def tres_mul(a, b, c):
return a * b * c
def quatro_mul(a, b, c, d):
return a * b * c * d
# Make these into ufuncs.
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
class A(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return "A"
class ASub(A):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return "ASub"
class B(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return "B"
class C(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return NotImplemented
class CSub(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
c_sub = CSub()
# Standard
res = np.multiply(a, a_sub)
assert_equal(res, "ASub")
res = np.multiply(a_sub, b)
assert_equal(res, "ASub")
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
# Both NotImplemented.
assert_raises(TypeError, np.multiply, c, c_sub)
assert_raises(TypeError, np.multiply, c_sub, c)
assert_raises(TypeError, np.multiply, 2, c)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
assert_equal(three_mul_ufunc(1, a, 2), "A")
assert_equal(three_mul_ufunc(1, 2, a), "A")
assert_equal(three_mul_ufunc(a, a, 6), "A")
assert_equal(three_mul_ufunc(a, 2, a), "A")
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
assert_equal(three_mul_ufunc(a, b, c), "A")
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c)
assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3)
# Quaternary testing.
assert_equal(four_mul_ufunc(a, 1, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, 3), "A")
assert_equal(four_mul_ufunc(1, 1, a, 3), "A")
assert_equal(four_mul_ufunc(1, 1, 2, a), "A")
assert_equal(four_mul_ufunc(a, b, 2, 3), "A")
assert_equal(four_mul_ufunc(1, a, 2, b), "A")
assert_equal(four_mul_ufunc(b, 1, a, 3), "B")
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c)
def test_ufunc_override_methods(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return self, ufunc, method, pos, inputs, kwargs
# __call__
a = A()
res = np.multiply.__call__(1, a, foo='bar', answer=42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], '__call__')
assert_equal(res[3], 1)
assert_equal(res[4], (1, a))
assert_equal(res[5], {'foo': 'bar', 'answer': 42})
# reduce, positional args
res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'keepdims': 'keep0',
'axis': 'axis0'})
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
keepdims='keep0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'keepdims': 'keep0',
'axis': 'axis0'})
# accumulate, pos args
res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# accumulate, kwargs
res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'accumulate')
assert_equal(res[3], 0)
assert_equal(res[4], (a,))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# reduceat, pos args
res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], 0)
assert_equal(res[4], (a, [4, 2]))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# reduceat, kwargs
res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0',
out='out0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduceat')
assert_equal(res[3], 0)
assert_equal(res[4], (a, [4, 2]))
assert_equal(res[5], {'dtype':'dtype0',
'out': 'out0',
'axis': 'axis0'})
# outer
res = np.multiply.outer(a, 42)
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'outer')
assert_equal(res[3], 0)
assert_equal(res[4], (a, 42))
assert_equal(res[5], {})
# at
res = np.multiply.at(a, [4, 2], 'b0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'at')
assert_equal(res[3], 0)
assert_equal(res[4], (a, [4, 2], 'b0'))
def test_ufunc_override_out(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return kwargs
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return kwargs
a = A()
b = B()
res0 = np.multiply(a, b, 'out_arg')
res1 = np.multiply(a, b, out='out_arg')
res2 = np.multiply(2, b, 'out_arg')
res3 = np.multiply(3, b, out='out_arg')
res4 = np.multiply(a, 4, 'out_arg')
res5 = np.multiply(a, 5, out='out_arg')
assert_equal(res0['out'], 'out_arg')
assert_equal(res1['out'], 'out_arg')
assert_equal(res2['out'], 'out_arg')
assert_equal(res3['out'], 'out_arg')
assert_equal(res4['out'], 'out_arg')
assert_equal(res5['out'], 'out_arg')
# ufuncs with multiple output modf and frexp.
res6 = np.modf(a, 'out0', 'out1')
res7 = np.frexp(a, 'out0', 'out1')
assert_equal(res6['out'][0], 'out0')
assert_equal(res6['out'][1], 'out1')
assert_equal(res7['out'][0], 'out0')
assert_equal(res7['out'][1], 'out1')
def test_ufunc_override_exception(self):
class A(object):
def __numpy_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
a = A()
for func in [np.divide, np.dot]:
assert_raises(ValueError, func, a, a)
class TestChoose(TestCase):
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
def is_longdouble_finfo_bogus():
info = np.finfo(np.longcomplex)
return not np.isfinite(np.log10(info.tiny/info.eps))
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
np.log1p]
def test_it(self):
for f in self.funcs:
if f is np.arccosh:
x = 1.5
else:
x = .5
fr = f(x)
fz = f(np.complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
def test_precisions_consistent(self):
z = 1 + 1j
for f in self.funcs:
fcf = f(np.csingle(z))
fcd = f(np.cdouble(z))
fcl = f(np.clongdouble(z))
assert_almost_equal(fcf, fcd, decimal=6, err_msg='fch-fcd %s' % f)
assert_almost_equal(fcl, fcd, decimal=15, err_msg='fch-fcl %s' % f)
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
def test_against_cmath(self):
import cmath
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
atol = 4*np.finfo(np.complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
try:
cfunc = getattr(cmath, cname)
except AttributeError:
continue
for p in points:
a = complex(func(np.complex_(p)))
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
def check_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
info = np.finfo(dtype)
real_dtype = dtype(0.).real.dtype
eps = info.eps
def check(x, rtol):
x = x.astype(real_dtype)
z = x.astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsinh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsinh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arcsinh(x)/np.arcsin(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arcsin'))
z = x.astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctanh(z).real - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctanh'))
z = (1j*x).astype(dtype)
d = np.absolute(np.arctanh(x)/np.arctan(z).imag - 1)
assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(),
'arctan'))
# The switchover was chosen as 1e-3; hence there can be up to
# ~eps/1e-3 of relative cancellation error before it
x_series = np.logspace(-20, -3.001, 200)
x_basic = np.logspace(-2.999, 0, 10, endpoint=False)
if dtype is np.longcomplex:
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
check(x_series, 2.1*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
z = np.array([1e-5*(1+1j)], dtype=dtype)
p = 9.999999999333333333e-6 + 1.000000000066666666e-5j
d = np.absolute(1-np.arctanh(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j
d = np.absolute(1-np.arcsinh(z)/p)
assert_(np.all(d < 1e-15))
p = 9.999999999333333333e-6j + 1.000000000066666666e-5
d = np.absolute(1-np.arctan(z)/p)
assert_(np.all(d < 1e-15))
p = 1.0000000000333333333e-5j + 9.999999999666666667e-6
d = np.absolute(1-np.arcsin(z)/p)
assert_(np.all(d < 1e-15))
# Check continuity across switchover points
def check(func, z0, d=1):
z0 = np.asarray(z0, dtype=dtype)
zp = z0 + abs(z0) * d * eps * 2
zm = z0 - abs(z0) * d * eps * 2
assert_(np.all(zp != zm), (zp, zm))
# NB: the cancellation error at the switchover is at least eps
good = (abs(func(zp) - func(zm)) < 2*eps)
assert_(np.all(good), (func, z0[~good]))
for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan):
pts = [rp+1j*ip for rp in (-1e-3, 0, 1e-3) for ip in(-1e-3, 0, 1e-3)
if rp != 0 or ip != 0]
check(func, pts, 1)
check(func, pts, 1j)
check(func, pts, 1+1j)
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
yield self.check_loss_of_precision, dtype
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
self.check_loss_of_precision(np.longcomplex)
class TestAttributes(TestCase):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
assert_(add.__doc__.startswith('add(x1, x2[, out])\n\n'))
self.assertTrue(add.ntypes >= 18) # don't fail if types added
self.assertTrue('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
class TestSubclass(TestCase):
def test_subclass_op(self):
class simple(np.ndarray):
def __new__(subtype, shape):
self = np.ndarray.__new__(subtype, shape, dtype=object)
self.fill(0)
return self
a = simple((3, 4))
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
dtype=np.complex):
"""
Check for a branch cut in a function.
Assert that `x0` lies on a branch cut of function `f` and `f` is
continuous from the direction `dx`.
Parameters
----------
f : func
Function to check
x0 : array-like
Point on branch cut
dx : array-like
Direction to check continuity in
re_sign, im_sign : {1, -1}
Change of sign of the real or imaginary part expected
sig_zero_ok : bool
Whether to check if the branch cut respects signed zero (if applicable)
dtype : dtype
Dtype to check (should be complex)
"""
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
if np.dtype(dtype).char == 'F':
scale = np.finfo(dtype).eps * 1e2
atol = np.float32(1e-2)
else:
scale = np.finfo(dtype).eps * 1e3
atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
ym = f(x0 - dx*scale*np.absolute(x0)/np.absolute(dx))
assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp))
assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
if sig_zero_ok:
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
if np.any(jr):
x = x0[jr]
x.real = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
if np.any(ji):
x = x0[ji]
x.imag = np.NZERO
ym = f(x)
assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
with np.errstate(divide="ignore"):
assert_(1 / np.copysign(0, -1) < 0)
assert_(1 / np.copysign(0, 1) > 0)
assert_(np.signbit(np.copysign(np.nan, -1)))
assert_(not np.signbit(np.copysign(np.nan, 1)))
def _test_nextafter(t):
one = t(1)
two = t(2)
zero = t(0)
eps = np.finfo(t).eps
assert_(np.nextafter(one, two) - one == eps)
assert_(np.nextafter(one, zero) - one < 0)
assert_(np.isnan(np.nextafter(np.nan, one)))
assert_(np.isnan(np.nextafter(one, np.nan)))
assert_(np.nextafter(one, one) == one)
def test_nextafter():
return _test_nextafter(np.float64)
def test_nextafterf():
return _test_nextafter(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
"Long double support buggy on win32 and PPC, ticket 1664.")
def test_nextafterl():
return _test_nextafter(np.longdouble)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing():
return _test_spacing(np.float64)
def test_spacingf():
return _test_spacing(np.float32)
@dec.knownfailureif(sys.platform == 'win32' or on_powerpc(),
"Long double support buggy on win32 and PPC, ticket 1664.")
def test_spacingl():
return _test_spacing(np.longdouble)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert_(np.signbit(np.nan) == 0)
def test_reduceat():
"""Test bug in reduceat when structured arrays are not copied."""
db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0, 7, 15, 25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
# test buffered -- this should work
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
# This is when the error occurs.
# test no buffer
np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2)
def test_reduceat_empty():
"""Reduceat should work with empty arrays"""
indices = np.array([], 'i4')
x = np.array([], 'f8')
result = np.add.reduceat(x, indices)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0,))
# Another case with a slightly different zero-sized shape
x = np.ones((5, 2))
result = np.add.reduceat(x, [], axis=0)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (0, 2))
result = np.add.reduceat(x, [], axis=1)
assert_equal(result.dtype, x.dtype)
assert_equal(result.shape, (5, 0))
def test_complex_nan_comparisons():
nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)]
fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1),
complex(1, 1), complex(-1, -1), complex(0, 0)]
with np.errstate(invalid='ignore'):
for x in nans + fins:
x = np.array([x])
for y in nans + fins:
y = np.array([y])
if np.isfinite(x) and np.isfinite(y):
continue
assert_equal(x < y, False, err_msg="%r < %r" % (x, y))
assert_equal(x > y, False, err_msg="%r > %r" % (x, y))
assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y))
assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y))
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
denisff/python-for-android | python3-alpha/extra_modules/gdata/Crypto/Cipher/__init__.py | 271 | 1145 | """Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
| apache-2.0 |
leeseuljeong/leeseulstack_neutron | neutron/tests/unit/test_l3_dvr.py | 34 | 3931 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from neutron.agent.l3 import link_local_allocator as lla
from neutron.tests import base
class TestLinkLocalAddrAllocator(base.BaseTestCase):
def setUp(self):
super(TestLinkLocalAddrAllocator, self).setUp()
self.subnet = netaddr.IPNetwork('169.254.31.0/24')
def test__init__(self):
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
self.assertEqual('/file', a.state_file)
self.assertEqual({}, a.allocations)
def test__init__readfile(self):
with mock.patch.object(lla.LinkLocalAllocator, '_read') as read:
read.return_value = ["da873ca2,169.254.31.28/31\n"]
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
self.assertTrue('da873ca2' in a.remembered)
self.assertEqual({}, a.allocations)
def test_allocate(self):
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
subnet = a.allocate('deadbeef')
self.assertTrue('deadbeef' in a.allocations)
self.assertTrue(subnet not in a.pool)
self._check_allocations(a.allocations)
write.assert_called_once_with(['deadbeef,%s\n' % subnet.cidr])
def test_allocate_from_file(self):
with mock.patch.object(lla.LinkLocalAllocator, '_read') as read:
read.return_value = ["deadbeef,169.254.31.88/31\n"]
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
subnet = a.allocate('deadbeef')
self.assertEqual(netaddr.IPNetwork('169.254.31.88/31'), subnet)
self.assertTrue(subnet not in a.pool)
self._check_allocations(a.allocations)
self.assertFalse(write.called)
def test_allocate_exhausted_pool(self):
subnet = netaddr.IPNetwork('169.254.31.0/31')
with mock.patch.object(lla.LinkLocalAllocator, '_read') as read:
read.return_value = ["deadbeef,169.254.31.0/31\n"]
a = lla.LinkLocalAllocator('/file', subnet.cidr)
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
allocation = a.allocate('abcdef12')
self.assertEqual(subnet, allocation)
self.assertFalse('deadbeef' in a.allocations)
self.assertTrue('abcdef12' in a.allocations)
self.assertTrue(allocation not in a.pool)
self._check_allocations(a.allocations)
write.assert_called_once_with(['abcdef12,%s\n' % allocation.cidr])
self.assertRaises(RuntimeError, a.allocate, 'deadbeef')
def test_release(self):
with mock.patch.object(lla.LinkLocalAllocator, '_write') as write:
a = lla.LinkLocalAllocator('/file', self.subnet.cidr)
subnet = a.allocate('deadbeef')
write.reset_mock()
a.release('deadbeef')
self.assertTrue('deadbeef' not in a.allocations)
self.assertTrue(subnet in a.pool)
self.assertEqual({}, a.allocations)
write.assert_called_once_with([])
def _check_allocations(self, allocations):
for key, subnet in allocations.items():
self.assertTrue(subnet in self.subnet)
self.assertEqual(subnet.prefixlen, 31)
| apache-2.0 |
wilvk/ansible | lib/ansible/modules/notification/flowdock.py | 27 | 5713 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: [ ]
'''
EXAMPLES = '''
- flowdock:
type: inbox
token: AAAAAA
from_address: [email protected]
source: my cool app
msg: test from ansible
subject: test subject
- flowdock:
type: chat
token: AAAAAA
external_user_name: testuser
msg: test from ansible
tags: tag1,tag2,tag3
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox", "chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="external_user_name is required for the 'chat' type")
# required params for the 'inbox' type
for item in ['from_address', 'source', 'subject']:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in ['from_name', 'reply_to', 'project', 'link']:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
if __name__ == '__main__':
main()
| gpl-3.0 |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/jinja2/exceptions.py | 222 | 4428 | # -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
| apache-2.0 |
rrrrrr8/vnpy | vnpy/api/fcoin/vnfcoin.py | 1 | 8705 | # encoding: UTF-8
from __future__ import print_function
import hashlib
import hmac
import json
import ssl
import traceback
import base64
from queue import Queue, Empty
from multiprocessing.dummy import Pool
from time import time
from urlparse import urlparse
from copy import copy
from urllib import urlencode
from threading import Thread
import requests
import websocket
from six.moves import input
REST_HOST = 'https://api.fcoin.com/v2'
WEBSOCKET_HOST = 'wss://api.fcoin.com/v2/ws'
########################################################################
class FcoinRestApi(object):
"""REST API"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.apiSecret = ''
self.active = False
self.reqid = 0
self.queue = Queue()
self.pool = None
self.sessionDict = {} # 会话对象字典
#----------------------------------------------------------------------
def init(self, apiKey, apiSecret):
"""初始化"""
self.apiKey = str(apiKey)
self.apiSecret = str(apiSecret)
#----------------------------------------------------------------------
def start(self, n=10):
"""启动"""
if self.active:
return
self.active = True
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.active = False
if self.pool:
self.pool.close()
self.pool.join()
#----------------------------------------------------------------------
def addReq(self, method, path, callback, params=None, postdict=None):
"""添加请求"""
self.reqid += 1
req = (method, path, callback, params, postdict, self.reqid)
self.queue.put(req)
return self.reqid
#----------------------------------------------------------------------
def processReq(self, req, i):
"""处理请求"""
method, path, callback, params, postdict, reqid = req
url = REST_HOST + path
timestamp = str(int(time()) * 1000)
header = {}
header['FC-ACCESS-TIMESTAMP'] = timestamp
header['FC-ACCESS-KEY'] = self.apiKey
header['FC-ACCESS-SIGNATURE'] = self.generateSignature(method, url, timestamp, params, postdict)
try:
# 使用长连接的session,比短连接的耗时缩短80%
session = self.sessionDict[i]
resp = session.request(method, url, headers=header, params=params, json=postdict)
#resp = requests.request(method, url, headers=header, params=params, data=postdict)
#if method != 'GET':
#print '-' * 30
#print 'method', method
#print 'url', url
#print 'header', header
#print 'params', params
#print 'postdict', postdict
code = resp.status_code
d = resp.json()
if code == 200:
callback(d, reqid)
else:
self.onError(code, d)
except Exception as e:
self.onError(type(e), e.message)
#----------------------------------------------------------------------
def run(self, i):
"""连续运行"""
self.sessionDict[i] = requests.Session()
while self.active:
try:
req = self.queue.get(timeout=1)
self.processReq(req, i)
except Empty:
pass
#----------------------------------------------------------------------
def generateSignature(self, method, path, timestamp, params=None, postdict=None):
"""生成签名"""
# 对params在HTTP报文路径中,以请求字段方式序列化
if params:
query = urlencode(sorted(params.items()))
path = path + '?' + query
if postdict:
post = urlencode(sorted(postdict.items()))
else:
post = ''
msg = method + path + timestamp + post
msg = base64.b64encode(msg)
signature = hmac.new(self.apiSecret, msg, digestmod=hashlib.sha1).digest()
signature = base64.b64encode(signature)
return signature
#----------------------------------------------------------------------
def onError(self, code, error):
"""错误回调"""
print('on error')
print(code, error)
#----------------------------------------------------------------------
def onData(self, data, reqid):
"""通用回调"""
print('on data')
print(data, reqid)
########################################################################
class FcoinWebsocketApi(object):
"""Websocket API"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.thread = None
self.active = False
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.ws = websocket.create_connection(WEBSOCKET_HOST,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.thread = Thread(target=self.run)
self.thread.start()
self.onConnect()
#----------------------------------------------------------------------
def reconnect(self):
"""重连"""
self.ws = websocket.create_connection(WEBSOCKET_HOST,
sslopt={'cert_reqs': ssl.CERT_NONE})
self.onConnect()
#----------------------------------------------------------------------
def run(self):
"""运行"""
while self.active:
try:
stream = self.ws.recv()
data = json.loads(stream)
self.onData(data)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.active = False
if self.thread:
self.thread.join()
#----------------------------------------------------------------------
def onConnect(self):
"""连接回调"""
print('connected')
#----------------------------------------------------------------------
def onData(self, data):
"""数据回调"""
print('-' * 30)
l = data.keys()
l.sort()
for k in l:
print(k, data[k])
#----------------------------------------------------------------------
def onError(self, msg):
"""错误回调"""
print(msg)
#----------------------------------------------------------------------
def sendReq(self, req):
"""发出请求"""
self.ws.send(json.dumps(req))
if __name__ == '__main__':
from datetime import datetime
from time import sleep
API_KEY = '88893f839fbd49f4b5fcb03e7c15c015'
API_SECRET = 'ef383295cf4e4c128e6d18d7e9564b12'
# REST测试
rest = FcoinRestApi()
rest.init(API_KEY, API_SECRET)
rest.start(3)
#rest.addReq('GET', '/accounts/balance', rest.onData)
# 查委托
#states = ['submitted', 'partial_filled', 'partial_canceled',
#'filled', 'canceled', 'pending_cancel']
#req = {
#'symbol': 'ethusdt',
#'start': datetime.now().strftime('%Y%m%d'),
#'states': 'submitted',
#'limit': 500
#}
#for i in range(10):
#rest.addReq('GET', '/orders', rest.onData, params=req)
#sleep(2)
req = {
'symbol': 'ethusdt',
'side': 'buy',
'type': 'limit',
'price': 300,
'amount': 0.01
}
rest.addReq('POST', '/orders', rest.onData, postdict=req)
#sleep(1)
#rest.addReq('POST', '/orders', rest.onData, params=req)
## WS测试
#ws = FcoinWebsocketApi()
#ws.start()
#req = {
#'cmd': 'sub',
#'args': ['depth.L20.btcusdt'],
#'id': 1
#}
#ws.sendReq(req)
input()
| mit |
gilisagreen/Project4 | lib/flask/blueprints.py | 773 | 16320 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| apache-2.0 |
Metronote/metronotesd-alpha | lib/blockchain/sochain.py | 6 | 3142 | '''
chain.sp
'''
import logging
from lib import config, util
def get_host():
if config.BLOCKCHAIN_SERVICE_CONNECT:
return config.BLOCKCHAIN_SERVICE_CONNECT
else:
return 'https://chain.so'
def sochain_network():
network = config.BTC
if config.TESTNET:
network += 'TEST'
return network
def check():
pass
def getinfo():
result = util.get_url(get_host() + '/api/v2/get_info/{}'.format(sochain_network()), abort_on_error=True)
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['blocks']
}
}
else:
return None
def listunspent(address):
result = util.get_url(get_host() + '/api/v2/get_tx_unspent/{}/{}'.format(sochain_network(), address), abort_on_error=True)
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['txs']:
newtxo = {
'address': address,
'txid': txo['txid'],
'vout': txo['output_no'],
'ts': txo['time'],
'scriptPubKey': txo['script_hex'],
'amount': float(txo['value']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
else:
return None
def getaddressinfo(address):
infos = util.get_url(get_host() + '/api/v2/address/{}/{}'.format(sochain_network(), address), abort_on_error=True)
if 'status' in infos and infos['status'] == 'success':
transactions = []
for tx in infos['data']['txs']:
transactions.append(tx['txid'])
return {
'addrStr': address,
'balance': float(infos['data']['balance']),
'balanceSat': float(infos['data']['balance']) * config.UNIT,
'totalReceived': float(infos['data']['received_value']),
'totalReceivedSat': float(infos['data']['received_value']) * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': infos['data']['total_txs'],
'transactions': transactions
}
return None
def gettransaction(tx_hash):
tx = util.get_url(get_host() + '/api/v2/get_tx/{}/{}'.format(sochain_network(), address), abort_on_error=True)
if 'status' in tx and tx['status'] == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += float(vout['value'])
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx']['blockhash'],
'confirmations': tx['data']['tx']['confirmations'],
'time': tx['data']['tx']['time'],
'blocktime': tx['data']['tx']['blocktime'],
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None | mit |
knossos-project/PythonQt | examples/NicePyConsole/pygments/lexers/inferno.py | 52 | 3110 | # -*- coding: utf-8 -*-
"""
pygments.lexers.inferno
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Inferno os and all the related stuff.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number
__all__ = ['LimboLexer']
class LimboLexer(RegexLexer):
"""
Lexer for `Limbo programming language <http://www.vitanuova.com/inferno/limbo.html>`_
TODO:
- maybe implement better var declaration highlighting
- some simple syntax error highlighting
.. versionadded:: 2.0
"""
name = 'Limbo'
aliases = ['limbo']
filenames = ['*.b']
mimetypes = ['text/limbo']
tokens = {
'whitespace': [
(r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\', String), # stray backslash
],
'statements': [
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
(r'16r[0-9a-fA-F]+', Number.Hex),
(r'8r[0-7]+', Number.Oct),
(r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
(r'[()\[\],.]', Punctuation),
(r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
(r'(alt|break|case|continue|cyclic|do|else|exit'
r'for|hd|if|implement|import|include|len|load|or'
r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
}
def analyse_text(text):
# Any limbo module implements something
if re.search(r'^implement \w+;', text, re.MULTILINE):
return 0.7
# TODO:
# - Make lexers for:
# - asm sources
# - man pages
# - mkfiles
# - module definitions
# - namespace definitions
# - shell scripts
# - maybe keyfiles and fonts
# they all seem to be quite similar to their equivalents
# from unix world, so there should not be a lot of problems
| lgpl-2.1 |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/internet/test/reactormixins.py | 7 | 12552 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utilities for unit testing reactor implementations.
The main feature of this module is L{ReactorBuilder}, a base class for use when
writing interface/blackbox tests for reactor implementations. Test case classes
for reactor features should subclass L{ReactorBuilder} instead of
L{SynchronousTestCase}. All of the features of L{SynchronousTestCase} will be
available. Additionally, the tests will automatically be applied to all
available reactor implementations.
"""
from __future__ import division, absolute_import
__metaclass__ = type
__all__ = ['TestTimeoutError', 'ReactorBuilder', 'needsRunningReactor']
import os, signal, time
from twisted.python.compat import _PY3
from twisted.trial.unittest import SynchronousTestCase, SkipTest
from twisted.trial.util import DEFAULT_TIMEOUT_DURATION, acquireAttribute
from twisted.python.runtime import platform
from twisted.python._reflectpy3 import namedAny
from twisted.python.deprecate import _fullyQualifiedName as fullyQualifiedName
from twisted.python import log
from twisted.python.failure import Failure
# Access private APIs.
if platform.isWindows():
process = None
elif _PY3:
# Enable this on Python 3 when twisted.internet.process is ported.
# See #5968.
process = None
else:
from twisted.internet import process
class TestTimeoutError(Exception):
"""
The reactor was still running after the timeout period elapsed in
L{ReactorBuilder.runReactor}.
"""
def needsRunningReactor(reactor, thunk):
"""
Various functions within these tests need an already-running reactor at
some point. They need to stop the reactor when the test has completed, and
that means calling reactor.stop(). However, reactor.stop() raises an
exception if the reactor isn't already running, so if the L{Deferred} that
a particular API under test returns fires synchronously (as especially an
endpoint's C{connect()} method may do, if the connect is to a local
interface address) then the test won't be able to stop the reactor being
tested and finish. So this calls C{thunk} only once C{reactor} is running.
(This is just an alias for
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
reactor parameter, in order to centrally reference the above paragraph and
repeating it everywhere as a comment.)
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
@param thunk: a 0-argument callable, which eventually finishes the test in
question, probably in a L{Deferred} callback.
"""
reactor.callWhenRunning(thunk)
class ReactorBuilder:
"""
L{SynchronousTestCase} mixin which provides a reactor-creation API. This
mixin defines C{setUp} and C{tearDown}, so mix it in before
L{SynchronousTestCase} or call its methods from the overridden ones in the
subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, C{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
L{SynchronousTestCase}s will be created.
"""
_reactors = [
# Select works everywhere
"twisted.internet.selectreactor.SelectReactor",
]
if platform.isWindows():
# PortableGtkReactor is only really interesting on Windows,
# but not really Windows specific; if you want you can
# temporarily move this up to the all-platforms list to test
# it on other platforms. It's not there in general because
# it's not _really_ worth it to support on other platforms,
# since no one really wants to use it on other platforms.
_reactors.extend([
"twisted.internet.gtk2reactor.PortableGtkReactor",
"twisted.internet.gireactor.PortableGIReactor",
"twisted.internet.gtk3reactor.PortableGtk3Reactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor"])
else:
_reactors.extend([
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.gireactor.GIReactor",
"twisted.internet.gtk3reactor.Gtk3Reactor"])
if platform.isMacOSX():
_reactors.append("twisted.internet.cfreactor.CFReactor")
else:
_reactors.extend([
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor"])
if not platform.isLinux():
# Presumably Linux is not going to start supporting kqueue, so
# skip even trying this configuration.
_reactors.extend([
# Support KQueue on non-OS-X POSIX platforms for now.
"twisted.internet.kqreactor.KQueueReactor",
])
reactorFactory = None
originalHandler = None
requiredInterfaces = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if not platform.isWindows():
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
begin = time.time()
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
# The process should exit on its own. However, if it
# doesn't, we're stuck in this loop forever. To avoid
# hanging the test suite, eventually give the process some
# help exiting and move on.
time.sleep(0.001)
if time.time() - begin > 60:
for pid in process.reapProcessHandlers:
os.kill(pid, signal.SIGKILL)
raise Exception(
"Timeout waiting for child processes to exit: %r" % (
process.reapProcessHandlers,))
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except ImportError:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = [
required for required in self.requiredInterfaces
if not required.providedBy(reactor)]
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%s does not provide %s" % (
fullyQualifiedName(reactor.__class__),
",".join([fullyQualifiedName(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def getTimeout(self):
"""
Determine how long to run the test before considering it failed.
@return: A C{int} or C{float} giving a number of seconds.
"""
return acquireAttribute(self._parents, 'timeout', DEFAULT_TIMEOUT_DURATION)
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If C{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TestTimeoutError: If the reactor is still running after
C{timeout} seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TestTimeoutError(
"reactor still running after %s seconds" % (timeout,))
def makeTestCaseClasses(cls):
"""
Create a L{SynchronousTestCase} subclass which mixes in C{cls} for each
known reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
class testcase(cls, SynchronousTestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)
| gpl-3.0 |
mbareta/edx-platform-ft | common/djangoapps/student/tests/test_user_profile_properties.py | 11 | 3560 | """Unit tests for custom UserProfile properties."""
import datetime
import ddt
from django.test import TestCase
from student.models import UserProfile
from student.tests.factories import UserFactory
from django.core.cache import cache
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
class UserProfilePropertiesTest(CacheIsolationTestCase):
"""Unit tests for age, gender_display, and level_of_education_display properties ."""
password = "test"
ENABLED_CACHES = ['default']
def setUp(self):
super(UserProfilePropertiesTest, self).setUp()
self.user = UserFactory.create(password=self.password)
self.profile = self.user.profile
def _set_year_of_birth(self, year_of_birth):
"""
Helper method that sets a birth year for the specified user.
"""
self.profile.year_of_birth = year_of_birth
self.profile.save()
def _set_level_of_education(self, level_of_education):
"""
Helper method that sets a level of education for the specified user.
"""
self.profile.level_of_education = level_of_education
self.profile.save()
def _set_gender(self, gender):
"""
Helper method that sets a gender for the specified user.
"""
self.profile.gender = gender
self.profile.save()
@ddt.data(0, 1, 13, 20, 100)
def test_age(self, years_ago):
"""Verify the age calculated correctly."""
current_year = datetime.datetime.now().year
self._set_year_of_birth(current_year - years_ago)
# In the year that your turn a certain age you will also have been a
# year younger than that in that same year. We calculate age based off of
# the youngest you could be that year.
age = years_ago - 1
self.assertEqual(self.profile.age, age)
def test_age_no_birth_year(self):
"""Verify nothing is returned."""
self.assertIsNone(self.profile.age)
@ddt.data(*UserProfile.LEVEL_OF_EDUCATION_CHOICES)
@ddt.unpack
def test_display_level_of_education(self, level_enum, display_level):
"""Verify the level of education is displayed correctly."""
self._set_level_of_education(level_enum)
self.assertEqual(self.profile.level_of_education_display, display_level)
def test_display_level_of_education_none_set(self):
"""Verify nothing is returned."""
self.assertIsNone(self.profile.level_of_education_display)
@ddt.data(*UserProfile.GENDER_CHOICES)
@ddt.unpack
def test_display_gender(self, gender_enum, display_gender):
"""Verify the gender displayed correctly."""
self._set_gender(gender_enum)
self.assertEqual(self.profile.gender_display, display_gender)
def test_display_gender_none_set(self):
"""Verify nothing is returned."""
self._set_gender(None)
self.assertIsNone(self.profile.gender_display)
def test_invalidate_cache_user_profile_country_updated(self):
country = 'us'
self.profile.country = country
self.profile.save()
cache_key = UserProfile.country_cache_key_name(self.user.id)
self.assertIsNone(cache.get(cache_key))
cache.set(cache_key, self.profile.country)
self.assertEqual(cache.get(cache_key), country)
country = 'bd'
self.profile.country = country
self.profile.save()
self.assertNotEqual(cache.get(cache_key), country)
self.assertIsNone(cache.get(cache_key))
| agpl-3.0 |
kkintaro/termite-data-server | web2py/gluon/contrib/memdb.py | 9 | 28472 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro <[email protected]> and
Robin B <[email protected]>.
License: GPL v2
"""
__all__ = ['MEMDB', 'Field']
import re
import sys
import os
import types
import datetime
import thread
import cStringIO
import csv
import copy
import gluon.validators as validators
from gluon.utils import web2py_uuid
from gluon.storage import Storage
from gluon import SQLTABLE
import random
SQL_DIALECTS = {'memcache': {
'boolean': bool,
'string': unicode,
'text': unicode,
'password': unicode,
'blob': unicode,
'upload': unicode,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': int,
'reference': int,
'lower': None,
'upper': None,
'is null': 'IS NULL',
'is not null': 'IS NOT NULL',
'extract': None,
'left join': None,
}}
def cleanup(text):
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError('Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text)
return text
def assert_filter_fields(*fields):
for field in fields:
if isinstance(field, (Field, Expression)) and field.type\
in ['text', 'blob']:
raise SyntaxError('AppEngine does not index by: %s'
% field.type)
def dateobj_to_datetime(object):
# convert dates,times to datetimes for AppEngine
if isinstance(object, datetime.date):
object = datetime.datetime(object.year, object.month,
object.day)
if isinstance(object, datetime.time):
object = datetime.datetime(
1970,
1,
1,
object.hour,
object.minute,
object.second,
object.microsecond,
)
return object
def sqlhtml_validators(field_type, length):
v = {
'boolean': [],
'string': validators.IS_LENGTH(length),
'text': [],
'password': validators.IS_LENGTH(length),
'blob': [],
'upload': [],
'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100),
'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100),
'date': validators.IS_DATE(),
'time': validators.IS_TIME(),
'datetime': validators.IS_DATETIME(),
'reference': validators.IS_INT_IN_RANGE(0, 1e100),
}
try:
return v[field_type[:9]]
except KeyError:
return []
class DALStorage(dict):
"""
a dictionary that let you do d['a'] as well as d.a
"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self:
raise SyntaxError(
'Object \'%s\'exists and cannot be redefined' % key)
self[key] = value
def __repr__(self):
return '<DALStorage ' + dict.__repr__(self) + '>'
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class MEMDB(DALStorage):
"""
an instance of this class represents a database connection
Example::
db=MEMDB(Client())
db.define_table('tablename',Field('fieldname1'),
Field('fieldname2'))
"""
def __init__(self, client):
self._dbname = 'memdb'
self['_lastsql'] = ''
self.tables = SQLCallableList()
self._translator = SQL_DIALECTS['memcache']
self.client = client
def define_table(
self,
tablename,
*fields,
**args
):
tablename = cleanup(tablename)
if tablename in dir(self) or tablename[0] == '_':
raise SyntaxError('invalid table name: %s' % tablename)
if not tablename in self.tables:
self.tables.append(tablename)
else:
raise SyntaxError('table already defined: %s' % tablename)
t = self[tablename] = Table(self, tablename, *fields)
t._create()
return t
def __call__(self, where=''):
return Set(self, where)
class SQLALL(object):
def __init__(self, table):
self.table = table
class Table(DALStorage):
"""
an instance of this class represents a database table
Example::
db=MEMDB(Client())
db.define_table('users',Field('name'))
db.users.insert(name='me')
"""
def __init__(
self,
db,
tablename,
*fields
):
self._db = db
self._tablename = tablename
self.fields = SQLCallableList()
self._referenced_by = []
fields = list(fields)
fields.insert(0, Field('id', 'id'))
for field in fields:
self.fields.append(field.name)
self[field.name] = field
field._tablename = self._tablename
field._table = self
field._db = self._db
self.ALL = SQLALL(self)
def _create(self):
fields = []
myfields = {}
for k in self.fields:
field = self[k]
attr = {}
if not field.type[:9] in ['id', 'reference']:
if field.notnull:
attr = dict(required=True)
if field.type[:2] == 'id':
continue
if field.type[:9] == 'reference':
referenced = field.type[10:].strip()
if not referenced:
raise SyntaxError('Table %s: reference \'%s\' to nothing!' % (
self._tablename, k))
if not referenced in self._db:
raise SyntaxError(
'Table: table %s does not exist' % referenced)
referee = self._db[referenced]
ftype = \
self._db._translator[field.type[:9]](
self._db[referenced]._tableobj)
if self._tablename in referee.fields: # ## THIS IS OK
raise SyntaxError('Field: table \'%s\' has same name as a field '
'in referenced table \'%s\'' % (
self._tablename, referenced))
self._db[referenced]._referenced_by.append((self._tablename,
field.name))
elif not field.type in self._db._translator\
or not self._db._translator[field.type]:
raise SyntaxError('Field: unknown field type %s' % field.type)
self._tableobj = self._db.client
return None
def create(self):
# nothing to do, here for backward compatility
pass
def drop(self):
# nothing to do, here for backward compatibility
self._db(self.id > 0).delete()
def insert(self, **fields):
# Checks 3 times that the id is new. 3 times is enough!
for i in range(3):
id = self._create_id()
if self.get(id) is None and self.update(id, **fields):
return long(id)
else:
raise RuntimeError("Too many ID conflicts")
def get(self, id):
val = self._tableobj.get(self._id_to_key(id))
if val:
return Storage(val)
else:
return None
def update(self, id, **fields):
for field in fields:
if not field in fields and self[field].default\
is not None:
fields[field] = self[field].default
if field in fields:
fields[field] = obj_represent(fields[field],
self[field].type, self._db)
return self._tableobj.set(self._id_to_key(id), fields)
def delete(self, id):
return self._tableobj.delete(self._id_to_key(id))
def _id_to_key(self, id):
return '__memdb__/t/%s/k/%s' % (self._tablename, str(id))
def _create_id(self):
return long(web2py_uuid().replace('-',''),16)
def __str__(self):
return self._tablename
def __call__(self, id, **kwargs):
record = self.get(id)
if kwargs and any(record[key]!=kwargs[key] for key in kwargs):
return None
return record
class Expression(object):
def __init__(
self,
name,
type='string',
db=None,
):
(self.name, self.type, self._db) = (name, type, db)
def __str__(self):
return self.name
def __or__(self, other): # for use in sortby
assert_filter_fields(self, other)
return Expression(self.name + '|' + other.name, None, None)
def __invert__(self):
assert_filter_fields(self)
return Expression('-' + self.name, self.type, None)
# for use in Query
def __eq__(self, value):
return Query(self, '=', value)
def __ne__(self, value):
return Query(self, '!=', value)
def __lt__(self, value):
return Query(self, '<', value)
def __le__(self, value):
return Query(self, '<=', value)
def __gt__(self, value):
return Query(self, '>', value)
def __ge__(self, value):
return Query(self, '>=', value)
# def like(self,value): return Query(self,' LIKE ',value)
# def belongs(self,value): return Query(self,' IN ',value)
# for use in both Query and sortby
def __add__(self, other):
return Expression('%s+%s' % (self, other), 'float', None)
def __sub__(self, other):
return Expression('%s-%s' % (self, other), 'float', None)
def __mul__(self, other):
return Expression('%s*%s' % (self, other), 'float', None)
def __div__(self, other):
return Expression('%s/%s' % (self, other), 'float', None)
class Field(Expression):
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, required=False,
default=None, requires=IS_NOT_EMPTY(), notnull=False,
unique=False, uploadfield=True)
to be used as argument of GQLDB.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length or 512 by default.
fields should have a default or they will be required in SQLFORMs
the requires argument are used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=None,
required=False,
requires=sqlhtml_validators,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
):
self.name = cleanup(fieldname)
if fieldname in dir(Table) or fieldname[0] == '_':
raise SyntaxError('Field: invalid field name: %s' % fieldname)
if isinstance(type, Table):
type = 'reference ' + type._tablename
if not length:
length = 512
self.type = type # 'string', 'integer'
self.length = length # the length of the string
self.default = default # default value for field
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
if requires == sqlhtml_validators:
requires = sqlhtml_validators(type, length)
elif requires is None:
requires = []
self.requires = requires # list of validators
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def __str__(self):
return '%s.%s' % (self._tablename, self.name)
MEMDB.Field = Field # ## required by gluon/globals.py session.connect
def obj_represent(object, fieldtype, db):
if object is not None:
if fieldtype == 'date' and not isinstance(object,
datetime.date):
(y, m, d) = [int(x) for x in str(object).strip().split('-')]
object = datetime.date(y, m, d)
elif fieldtype == 'time' and not isinstance(object, datetime.time):
time_items = [int(x) for x in str(object).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.time(h, mi, s)
elif fieldtype == 'datetime' and not isinstance(object,
datetime.datetime):
(y, m, d) = [int(x) for x in
str(object)[:10].strip().split('-')]
time_items = [int(x) for x in
str(object)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
object = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
elif fieldtype == 'integer' and not isinstance(object, long):
object = long(object)
return object
class QueryException:
def __init__(self, **a):
self.__dict__ = a
class Query(object):
"""
A query object necessary to define a set.
It can be stored or can be passed to GQLDB.__call__() to obtain a Set
Example:
query=db.users.name=='Max'
set=db(query)
records=set.select()
"""
def __init__(
self,
left,
op=None,
right=None,
):
if isinstance(right, (Field, Expression)):
raise SyntaxError(
'Query: right side of filter must be a value or entity')
if isinstance(left, Field) and left.name == 'id':
if op == '=':
self.get_one = QueryException(
tablename=left._tablename, id=long(right or 0))
return
else:
raise SyntaxError('only equality by id is supported')
raise SyntaxError('not supported')
def __str__(self):
return str(self.left)
class Set(object):
"""
As Set represents a set of records in the database,
the records are identified by the where=Query(...) object.
normally the Set is generated by GQLDB.__call__(Query(...))
given a set, for example
set=db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
and take subsets:
subset=set(db.users.id<5)
"""
def __init__(self, db, where=None):
self._db = db
self._tables = []
self.filters = []
if hasattr(where, 'get_all'):
self.where = where
self._tables.insert(0, where.get_all)
elif hasattr(where, 'get_one') and isinstance(where.get_one,
QueryException):
self.where = where.get_one
else:
# find out which tables are involved
if isinstance(where, Query):
self.filters = where.left
self.where = where
self._tables = [field._tablename for (field, op, val) in
self.filters]
def __call__(self, where):
if isinstance(self.where, QueryException) or isinstance(where,
QueryException):
raise SyntaxError('neither self.where nor where can be a QueryException instance')
if self.where:
return Set(self._db, self.where & where)
else:
return Set(self._db, where)
def _get_table_or_raise(self):
tablenames = list(set(self._tables)) # unique
if len(tablenames) < 1:
raise SyntaxError('Set: no tables selected')
if len(tablenames) > 1:
raise SyntaxError('Set: no join in appengine')
return self._db[tablenames[0]]._tableobj
def _getitem_exception(self):
(tablename, id) = (self.where.tablename, self.where.id)
fields = self._db[tablename].fields
self.colnames = ['%s.%s' % (tablename, t) for t in fields]
item = self._db[tablename].get(id)
return (item, fields, tablename, id)
def _select_except(self):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return []
new_item = []
for t in fields:
if t == 'id':
new_item.append(long(id))
else:
new_item.append(getattr(item, t))
r = [new_item]
return Rows(self._db, r, *self.colnames)
def select(self, *fields, **attributes):
"""
Always returns a Rows object, even if it may be empty
"""
if isinstance(self.where, QueryException):
return self._select_except()
else:
raise SyntaxError('select arguments not supported')
def count(self):
return len(self.select())
def delete(self):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
self._db[tablename].delete(id)
else:
raise Exception('deletion not implemented')
def update(self, **update_fields):
if isinstance(self.where, QueryException):
(item, fields, tablename, id) = self._getitem_exception()
if not item:
return
for (key, value) in update_fields.items():
setattr(item, key, value)
self._db[tablename].update(id, **item)
else:
raise Exception('update not implemented')
def update_record(
t,
s,
id,
a,
):
item = s.get(id)
for (key, value) in a.items():
t[key] = value
setattr(item, key, value)
s.update(id, **item)
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## this class still needs some work to care for ID/OID
def __init__(
self,
db,
response,
*colnames
):
self._db = db
self.colnames = colnames
self.response = response
def __len__(self):
return len(self.response)
def __getitem__(self, i):
if i >= len(self.response) or i < 0:
raise SyntaxError('Rows: no such row: %i' % i)
if len(self.response[0]) != len(self.colnames):
raise SyntaxError('Rows: internal error')
row = DALStorage()
for j in xrange(len(self.colnames)):
value = self.response[i][j]
if isinstance(value, unicode):
value = value.encode('utf-8')
packed = self.colnames[j].split('.')
try:
(tablename, fieldname) = packed
except:
if not '_extra' in row:
row['_extra'] = DALStorage()
row['_extra'][self.colnames[j]] = value
continue
table = self._db[tablename]
field = table[fieldname]
if not tablename in row:
row[tablename] = DALStorage()
if field.type[:9] == 'reference':
referee = field.type[10:].strip()
rid = value
row[tablename][fieldname] = rid
elif field.type == 'boolean' and value is not None:
# row[tablename][fieldname]=Set(self._db[referee].id==rid)
if value == True or value == 'T':
row[tablename][fieldname] = True
else:
row[tablename][fieldname] = False
elif field.type == 'date' and value is not None\
and not isinstance(value, datetime.date):
(y, m, d) = [int(x) for x in
str(value).strip().split('-')]
row[tablename][fieldname] = datetime.date(y, m, d)
elif field.type == 'time' and value is not None\
and not isinstance(value, datetime.time):
time_items = [int(x) for x in
str(value).strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.time(h, mi, s)
elif field.type == 'datetime' and value is not None\
and not isinstance(value, datetime.datetime):
(y, m, d) = [int(x) for x in
str(value)[:10].strip().split('-')]
time_items = [int(x) for x in
str(value)[11:].strip().split(':')[:3]]
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
row[tablename][fieldname] = datetime.datetime(
y,
m,
d,
h,
mi,
s,
)
else:
row[tablename][fieldname] = value
if fieldname == 'id':
id = row[tablename].id
row[tablename].update_record = lambda t = row[tablename], \
s = self._db[tablename], id = id, **a: update_record(t,
s, id, a)
for (referee_table, referee_name) in \
table._referenced_by:
s = self._db[referee_table][referee_name]
row[tablename][referee_table] = Set(self._db, s
== id)
if len(row.keys()) == 1:
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
writer = csv.writer(s)
writer.writerow(self.colnames)
c = len(self.colnames)
for i in xrange(len(self)):
row = [self.response[i][j] for j in xrange(c)]
for k in xrange(c):
if isinstance(row[k], unicode):
row[k] = row[k].encode('utf-8')
writer.writerow(row)
return s.getvalue()
def xml(self):
"""
serializes the table using SQLTABLE (if present)
"""
return SQLTABLE(self).xml()
def test_all():
"""
How to run from web2py dir:
export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH
python gluon/contrib/memdb.py
Setup the UTC timezone and database stubs
>>> import os
>>> os.environ['TZ'] = 'UTC'
>>> import time
>>> if hasattr(time, 'tzset'):
... time.tzset()
>>>
>>> from google.appengine.api import apiproxy_stub_map
>>> from google.appengine.api.memcache import memcache_stub
>>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
>>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
Create a table with all possible field types
>>> from google.appengine.api.memcache import Client
>>> db=MEMDB(Client())
>>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table')
Insert a field
>>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15))
>>> user_id != None
True
Select all
# >>> all = db().select(db.users.ALL)
Drop the table
# >>> db.users.drop()
Select many entities
>>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime'))
>>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow)
>>> few = 5
>>> most = many - few
>>> 0 < few < most < many
True
>>> for i in range(many):
... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i))
>>>
# test timezones
>>> class TZOffset(datetime.tzinfo):
... def __init__(self,offset=0):
... self.offset = offset
... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset)
... def dst(self, dt): return datetime.timedelta(0)
... def tzname(self, dt): return 'UTC' + str(self.offset)
...
>>> SERVER_OFFSET = -8
>>>
>>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201)
>>> post_id = db.posts.insert(created_at=stamp,body='body1')
>>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at
>>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset())
>>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET))
>>> stamp == naive_stamp
True
>>> utc_stamp == server_stamp
True
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 1
True
>>> rows[0].body == 'body1'
True
>>> db(db.posts.id==post_id).delete()
>>> rows = db(db.posts.id==post_id).select()
>>> len(rows) == 0
True
>>> id = db.posts.insert(total='0') # coerce str to integer
>>> rows = db(db.posts.id==id).select()
>>> len(rows) == 1
True
>>> rows[0].total == 0
True
Examples of insert, select, update, delete
>>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table')
>>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21')
>>> me=db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.id==person_id).update(name='massimo') # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.name
'massimo'
>>> str(me.birth)
'1971-12-21'
# resave date to ensure it comes back the same
>>> me=db(db.person.id==person_id).update(birth=me.birth) # test update
>>> me = db(db.person.id==person_id).select()[0]
>>> me.birth
datetime.date(1971, 12, 21)
>>> db(db.person.id==marco_id).delete() # test delete
>>> len(db(db.person.id==marco_id).select())
0
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
>>> me = db(db.person.id == person_id).select()[0]
>>> me.name
'Max'
"""
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = DALStorage
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
cfg2015/EPT-2015-2 | addons/sale_journal/sale_journal.py | 276 | 4290 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_journal_invoice_type(osv.osv):
_name = 'sale_journal.invoice.type'
_description = 'Invoice Types'
_columns = {
'name': fields.char('Invoice Type', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the invoice type without removing it."),
'note': fields.text('Note'),
'invoicing_method': fields.selection([('simple', 'Non grouped'), ('grouped', 'Grouped')], 'Invoicing method', required=True),
}
_defaults = {
'active': True,
'invoicing_method': 'simple'
}
#==============================================
# sale journal inherit
#==============================================
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_invoice_type': fields.property(
type = 'many2one',
relation = 'sale_journal.invoice.type',
string = "Invoicing Type",
group_name = "Accounting Properties",
help = "This invoicing type will be used, by default, to invoice the current partner."),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_invoice_type']
class picking(osv.osv):
_inherit = "stock.picking"
_columns = {
'invoice_type_id': fields.many2one('sale_journal.invoice.type', 'Invoice Type', readonly=True)
}
class stock_move(osv.osv):
_inherit = "stock.move"
def action_confirm(self, cr, uid, ids, context=None):
"""
Pass the invoice type to the picking from the sales order
(Should also work in case of Phantom BoMs when on explosion the original move is deleted, similar to carrier_id on delivery)
"""
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.invoice_type_id:
procs_to_check += [move.procurement_id]
res = super(stock_move, self).action_confirm(cr, uid, ids, context=context)
pick_obj = self.pool.get("stock.picking")
for proc in procs_to_check:
pickings = list(set([x.picking_id.id for x in proc.move_ids if x.picking_id and not x.picking_id.invoice_type_id]))
if pickings:
pick_obj.write(cr, uid, pickings, {'invoice_type_id': proc.sale_line_id.order_id.invoice_type_id.id}, context=context)
return res
class sale(osv.osv):
_inherit = "sale.order"
_columns = {
'invoice_type_id': fields.many2one('sale_journal.invoice.type', 'Invoice Type', help="Generate invoice based on the selected option.")
}
def onchange_partner_id(self, cr, uid, ids, part, context=None):
result = super(sale, self).onchange_partner_id(cr, uid, ids, part, context=context)
if part:
itype = self.pool.get('res.partner').browse(cr, uid, part, context=context).property_invoice_type
if itype:
result['value']['invoice_type_id'] = itype.id
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
patpatpatpatpat/digestus | digestus/users/migrations/0002_auto_20160411_0706.py | 1 | 1042 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-11 07:06
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| bsd-3-clause |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_str.py | 51 | 15114 |
import struct
import sys
from test import test_support, string_tests
class StrTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUserStringTest,
string_tests.MixinStrUnicodeTest,
):
type2test = str
# We don't need to propagate to str
def fixtype(self, obj):
return obj
def test_basic_creation(self):
self.assertEqual(str(''), '')
self.assertEqual(str(0), '0')
self.assertEqual(str(0L), '0')
self.assertEqual(str(()), '()')
self.assertEqual(str([]), '[]')
self.assertEqual(str({}), '{}')
a = []
a.append(a)
self.assertEqual(str(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(str(a), '{0: {...}}')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
self.assertRaises(OverflowError, '%c'.__mod__, 0x1234)
def test_conversion(self):
# Make sure __str__() behaves properly
class Foo0:
def __unicode__(self):
return u"foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return u"foo"
class Foo4(unicode):
def __str__(self):
return u"foo"
class Foo5(str):
def __str__(self):
return u"foo"
class Foo6(str):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo7(unicode):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "string"
def __unicode__(self):
return "not unicode"
self.assert_(str(Foo0()).startswith("<")) # this is different from __unicode__
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foos")
self.assertEqual(str(Foo7("bar")), "foos")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
def test_expandtabs_overflows_gracefully(self):
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
if sys.maxint > (1 << 32) or struct.calcsize('P') != 4:
return
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxint)
def test__format__(self):
def test(value, format, expected):
# test both with and without the trailing 's'
self.assertEqual(value.__format__(format), expected)
self.assertEqual(value.__format__(format + 's'), expected)
test('', '', '')
test('abc', '', 'abc')
test('abc', '.3', 'abc')
test('ab', '.3', 'ab')
test('abcdef', '.3', 'abc')
test('abcdef', '.0', '')
test('abc', '3.3', 'abc')
test('abc', '2.3', 'abc')
test('abc', '2.2', 'ab')
test('abc', '3.2', 'ab ')
test('result', 'x<0', 'result')
test('result', 'x<5', 'result')
test('result', 'x<6', 'result')
test('result', 'x<7', 'resultx')
test('result', 'x<8', 'resultxx')
test('result', ' <7', 'result ')
test('result', '<7', 'result ')
test('result', '>7', ' result')
test('result', '>8', ' result')
test('result', '^8', ' result ')
test('result', '^9', ' result ')
test('result', '^10', ' result ')
test('a', '10000', 'a' + ' ' * 9999)
test('', '10000', ' ' * 10000)
test('', '10000000', ' ' * 10000000)
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
# class that returns a bad type from __format__
class H:
def __format__(self, format_spec):
return 1.0
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r and !s coersions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0}'.format(E('data')), 'E(data)')
self.assertEqual('{0:^10}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:^10s}'.format(E('data')), ' E(data) ')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0:>15s}'.format(G('data')), ' string is data')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "{".format)
self.assertRaises(ValueError, "}".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(IndexError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(ValueError, "{:}".format)
self.assertRaises(ValueError, "{:s}".format)
self.assertRaises(ValueError, "{}".format)
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
def test_buffer_is_readonly(self):
self.assertRaises(TypeError, sys.stdin.readinto, b"")
def test_main():
test_support.run_unittest(StrTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
cchamanEE/pydare | test/dlyaptest.py | 2 | 1617 | from pydare.dlyap import dlyap_iterative, dlyap_schur, dlyap_slycot
import numpy
import unittest
class DlyapTestCase(unittest.TestCase):
def setUp(self):
self.a = numpy.matrix([[0.5,1.0],[-1.0,-1.0]])
self.q = numpy.matrix([[2.0,0.0],[0.0,0.5]])
def testIterative(self):
x = dlyap_iterative(self.a,self.q)
self.assertAlmostEqual(4.75,x[0,0],4)
self.assertAlmostEqual(4.1875,x[1,1],4)
for i in range(0,2):
for j in range(0,2):
if i != j:
self.assertAlmostEqual(-2.625,x[i,j],4)
def testDirect(self):
x = dlyap_schur(self.a,self.q)
self.assertAlmostEqual(4.75,x[0,0],4)
self.assertAlmostEqual(4.1875,x[1,1],4)
for i in range(0,2):
for j in range(0,2):
if i != j:
self.assertAlmostEqual(-2.625,x[i,j],4)
def testSLICOT(self):
x = dlyap_slycot(self.a,self.q)
self.assertAlmostEqual(4.75,x[0,0],4)
self.assertAlmostEqual(4.1875,x[1,1],4)
for i in range(0,2):
for j in range(0,2):
if i != j:
self.assertAlmostEqual(-2.625,x[i,j],4)
def suite():
suite = unittest.TestSuite()
suite.addTest(DlyapTestCase('testIterative'))
suite.addTest(DlyapTestCase('testDirect'))
suite.addTest(DlyapTestCase('testSLICOT'))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite()) | gpl-3.0 |
mortada/numpy | tools/swig/test/testFlat.py | 108 | 6906 | #! /usr/bin/env python
from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
import os
import sys
import unittest
import struct
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
import Flat
######################################################################
class FlatTestCase(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test the (type* INPLACE_ARRAY_FLAT, int DIM_FLAT) typemap
def testProcess1D(self):
"Test Process function 1D array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(10):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
y = x.copy()
process(y)
self.assertEquals(np.all((x+1)==y),True)
def testProcess3D(self):
"Test Process function 3D array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(24):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x.shape = (2,3,4)
y = x.copy()
process(y)
self.assertEquals(np.all((x+1)==y),True)
def testProcess3DTranspose(self):
"Test Process function 3D array, FORTRAN order"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(24):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x.shape = (2,3,4)
y = x.copy()
process(y.T)
self.assertEquals(np.all((x.T+1)==y.T),True)
def testProcessNoncontiguous(self):
"Test Process function with non-contiguous array, which should raise an error"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
process = Flat.__dict__[self.typeStr + "Process"]
pack_output = ''
for i in range(24):
pack_output += struct.pack(self.typeCode,i)
x = np.frombuffer(pack_output, dtype=self.typeCode)
x.shape = (2,3,4)
self.assertRaises(TypeError, process, x[:,:,0])
######################################################################
class scharTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
class ucharTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
class shortTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
######################################################################
class ushortTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
######################################################################
class intTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
class uintTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
######################################################################
class longTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
class ulongTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
class longLongTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
class ulongLongTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
class floatTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite( scharTestCase))
suite.addTest(unittest.makeSuite( ucharTestCase))
suite.addTest(unittest.makeSuite( shortTestCase))
suite.addTest(unittest.makeSuite( ushortTestCase))
suite.addTest(unittest.makeSuite( intTestCase))
suite.addTest(unittest.makeSuite( uintTestCase))
suite.addTest(unittest.makeSuite( longTestCase))
suite.addTest(unittest.makeSuite( ulongTestCase))
suite.addTest(unittest.makeSuite( longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite( floatTestCase))
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
print("Testing 1D Functions of Module Flat")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| bsd-3-clause |
plaes/flask-sendmail | docs/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| bsd-3-clause |
jhamman/xarray | xarray/core/dask_array_ops.py | 1 | 3415 | import numpy as np
from . import dtypes, nputils
def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):
"""Wrapper to apply bottleneck moving window funcs on dask arrays
"""
import dask.array as da
dtype, fill_value = dtypes.maybe_promote(a.dtype)
a = a.astype(dtype)
# inputs for overlap
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = (window + 1) // 2
boundary = {d: fill_value for d in range(a.ndim)}
# Create overlap array.
ag = da.overlap.overlap(a, depth=depth, boundary=boundary)
# apply rolling func
out = ag.map_blocks(
moving_func, window, min_count=min_count, axis=axis, dtype=a.dtype
)
# trim array
result = da.overlap.trim_internal(out, depth)
return result
def rolling_window(a, axis, window, center, fill_value):
"""Dask's equivalence to np.utils.rolling_window
"""
import dask.array as da
orig_shape = a.shape
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = int(window / 2)
# For evenly sized window, we need to crop the first point of each block.
offset = 1 if window % 2 == 0 else 0
if depth[axis] > min(a.chunks[axis]):
raise ValueError(
"For window size %d, every chunk should be larger than %d, "
"but the smallest chunk size is %d. Rechunk your array\n"
"with a larger chunk size or a chunk size that\n"
"more evenly divides the shape of your array."
% (window, depth[axis], min(a.chunks[axis]))
)
# Although da.overlap pads values to boundaries of the array,
# the size of the generated array is smaller than what we want
# if center == False.
if center:
start = int(window / 2) # 10 -> 5, 9 -> 4
end = window - 1 - start
else:
start, end = window - 1, 0
pad_size = max(start, end) + offset - depth[axis]
drop_size = 0
# pad_size becomes more than 0 when the overlapped array is smaller than
# needed. In this case, we need to enlarge the original array by padding
# before overlapping.
if pad_size > 0:
if pad_size < depth[axis]:
# overlapping requires each chunk larger than depth. If pad_size is
# smaller than the depth, we enlarge this and truncate it later.
drop_size = depth[axis] - pad_size
pad_size = depth[axis]
shape = list(a.shape)
shape[axis] = pad_size
chunks = list(a.chunks)
chunks[axis] = (pad_size,)
fill_array = da.full(shape, fill_value, dtype=a.dtype, chunks=chunks)
a = da.concatenate([fill_array, a], axis=axis)
boundary = {d: fill_value for d in range(a.ndim)}
# create overlap arrays
ag = da.overlap.overlap(a, depth=depth, boundary=boundary)
# apply rolling func
def func(x, window, axis=-1):
x = np.asarray(x)
rolling = nputils._rolling_window(x, window, axis)
return rolling[(slice(None),) * axis + (slice(offset, None),)]
chunks = list(a.chunks)
chunks.append(window)
out = ag.map_blocks(
func, dtype=a.dtype, new_axis=a.ndim, chunks=chunks, window=window, axis=axis
)
# crop boundary.
index = (slice(None),) * axis + (slice(drop_size, drop_size + orig_shape[axis]),)
return out[index]
| apache-2.0 |
jds2001/ocp-checkbox | plainbox/plainbox/impl/checkbox.py | 1 | 11485 | # This file is part of Checkbox.
#
# Copyright 2012, 2013 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.checkbox` -- CheckBox integration
=====================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
import collections
import io
import logging
import os
from plainbox.impl import get_plainbox_dir
from plainbox.impl.applogic import RegExpJobQualifier, CompositeQualifier
from plainbox.impl.job import JobDefinition
from plainbox.impl.rfc822 import load_rfc822_records
logger = logging.getLogger("plainbox.checkbox")
# NOTE: using CompositeQualifier seems strange but it's a tested proven
# component so all we have to ensure is that we read the whitelist files
# correctly.
class WhiteList(CompositeQualifier):
"""
A qualifier that understands checkbox whitelist files.
A whitelist file is a plain text, line oriented file. Each line represents
a regular expression pattern that can be matched against the name of a job.
The file can contain simple shell-style comments that begin with the pound
or hash key (#). Those are ignored. Comments can span both a fraction of a
line as well as the whole line.
For historical reasons each pattern has an implicit '^' and '$' prepended
and appended (respectively) to the actual pattern specified in the file.
"""
def __init__(self, pattern_list, name=None):
"""
Initialize a whitelist object with the specified list of patterns.
The patterns must be already mangled with '^' and '$'.
"""
inclusive = [RegExpJobQualifier(pattern) for pattern in pattern_list]
exclusive = ()
super(WhiteList, self).__init__(inclusive, exclusive)
self._name = name
@property
def name(self):
"""
name of this WhiteList (might be None)
"""
return self._name
@classmethod
def from_file(cls, pathname):
"""
Load and initialize the WhiteList object from the specified file.
:param pathname: file to load
:returns: a fresh WhiteList object
"""
pattern_list = cls._load_patterns(pathname)
name = os.path.splitext(os.path.basename(pathname))[0]
return cls(pattern_list, name=name)
@classmethod
def _load_patterns(self, pathname):
"""
Load whitelist patterns from the specified file
"""
pattern_list = []
# Load the file
with open(pathname, "rt", encoding="UTF-8") as stream:
for line in stream:
# Strip shell-style comments if there are any
try:
index = line.index("#")
except ValueError:
pass
else:
line = line[:index]
# Strip whitespace
line = line.strip()
# Skip empty lines (especially after stripping comments)
if line == "":
continue
# Surround the pattern with ^ and $
# so that it wont just match a part of the job name.
regexp_pattern = r"^{pattern}$".format(pattern=line)
# Accumulate patterns into the list
pattern_list.append(regexp_pattern)
return pattern_list
class CheckBoxNotFound(LookupError):
"""
Exception used to report that CheckBox cannot be located
"""
def __repr__(self):
return "CheckBoxNotFound()"
def __str__(self):
return "CheckBox cannot be found"
def _get_checkbox_dir():
"""
Return the root directory of the checkbox source checkout
Historically plainbox used a git submodule with checkbox tree (converted to
git). This ended with the merge of plainbox into the checkbox tree.
Now it's the other way around and the checkbox tree can be located two
directories "up" from the plainbox module, in a checkbox-old directory.
"""
return os.path.normpath(
os.path.join(
get_plainbox_dir(), "..", "..", "checkbox-old"))
class CheckBox:
"""
Helper class for interacting with CheckBox
PlainBox relies on CheckBox for actual jobs, scripts and library features
required by the scripts. This class allows one to interact with CheckBox
without having to bother with knowing how to set up the environment.
This class also abstracts away the differences between dealing with
CheckBox that is installed from system packages and CheckBox that is
available from a checkout directory.
"""
# Helper for locating certain directories
CheckBoxDirs = collections.namedtuple(
"CheckBoxDirs", "SHARE_DIR SCRIPTS_DIR JOBS_DIR DATA_DIR")
# Temporary helper to compute "src" value below
source_dir = _get_checkbox_dir()
_DIRECTORY_MAP = collections.OrderedDict((
# Layout for source checkout
("src", CheckBoxDirs(
source_dir,
os.path.join(source_dir, "scripts"),
os.path.join(source_dir, "jobs"),
os.path.join(source_dir, "data"))),
# Layout for installed version
("deb", CheckBoxDirs(
"/usr/share/checkbox/",
"/usr/share/checkbox/scripts",
"/usr/share/checkbox/jobs",
"/usr/share/checkbox/data"))))
# Remove temporary helper that was needed above
del source_dir
def __init__(self, mode=None):
"""
Initialize checkbox integration.
:param mode:
If specified it determines which checkbox installation to use.
None (default) enables auto-detection. Applicable values are
``src``, ``deb1`` and ``deb2``. The first value selects checkbox as
present in the code repository. The last two values are both for
intended for a checkbox package that was installed from the Ubuntu
repository. They are different as checkbox packaging changed across
releases.
:raises CheckBoxNotFound:
if checkbox cannot be located anywhere
:raises ValueError:
if ``mode`` is not supported
"""
# Auto-detect if not explicitly configured
if mode is None:
for possible_mode, dirs in self._DIRECTORY_MAP.items():
if all(os.path.exists(dirname) for dirname in dirs):
logger.info("Using checkbox in mode %s", possible_mode)
mode = possible_mode
break
else:
raise CheckBoxNotFound()
# Ensure mode is known
if mode not in self._DIRECTORY_MAP:
raise ValueError("Unsupported mode")
else:
self._mode = mode
self._dirs = self._DIRECTORY_MAP[mode]
@property
def CHECKBOX_SHARE(self):
"""
Return the required value of CHECKBOX_SHARE environment variable.
.. note::
This variable is only required by one script.
It would be nice to remove this later on.
"""
return self._dirs.SHARE_DIR
@property
def extra_PYTHONPATH(self):
"""
Return additional entry for PYTHONPATH, if needed.
This entry is required for CheckBox scripts to import the correct
CheckBox python libraries.
.. note::
The result may be None
"""
# NOTE: When CheckBox is installed then all the scripts should not use
# 'env' to locate the python interpreter (otherwise they might use
# virtualenv which is not desirable for Debian packages). When we're
# using CheckBox from source then the source directory (which contains
# the 'checkbox' package) should be added to PYTHONPATH for all the
# imports to work.
if self._mode == "src":
return _get_checkbox_dir()
else:
return None
@property
def extra_PATH(self):
"""
Return additional entry for PATH
This entry is required to lookup CheckBox scripts.
"""
# NOTE: This is always the script directory. The actual logic for
# locating it is implemented in the property accessors.
return self.scripts_dir
@property
def jobs_dir(self):
"""
Return an absolute path of the jobs directory
"""
return self._dirs.JOBS_DIR
@property
def whitelists_dir(self):
"""
Return an absolute path of the whitelist directory
"""
return os.path.join(self._dirs.DATA_DIR, "whitelists")
@property
def scripts_dir(self):
"""
Return an absolute path of the scripts directory
.. note::
The scripts may not work without setting PYTHONPATH and
CHECKBOX_SHARE.
"""
return self._dirs.SCRIPTS_DIR
def get_builtin_whitelists(self):
logger.debug("Loading built-in whitelists...")
whitelist_list = []
for name in os.listdir(self.whitelists_dir):
if name.endswith(".whitelist"):
whitelist_list.append(
WhiteList.from_file(os.path.join(
self.whitelists_dir, name)))
return whitelist_list
def get_builtin_jobs(self):
logger.debug("Loading built-in jobs...")
job_list = []
for name in os.listdir(self.jobs_dir):
if name.endswith(".txt") or name.endswith(".txt.in"):
job_list.extend(
self.load_jobs(
os.path.join(self.jobs_dir, name)))
return job_list
def load_jobs(self, somewhere):
"""
Load job definitions from somewhere
"""
if isinstance(somewhere, str):
# Load data from a file with the given name
filename = somewhere
with open(filename, 'rt', encoding='UTF-8') as stream:
return self.load_jobs(stream)
if isinstance(somewhere, io.TextIOWrapper):
stream = somewhere
logger.debug("Loading jobs definitions from %r...", stream.name)
record_list = load_rfc822_records(stream)
job_list = []
for record in record_list:
job = JobDefinition.from_rfc822_record(record)
job._checkbox = self
logger.debug("Loaded %r", job)
job_list.append(job)
return job_list
else:
raise TypeError(
"Unsupported type of 'somewhere': {!r}".format(
type(somewhere)))
@property
def name(self):
"""
name of this provider (always checkbox)
"""
return "checkbox"
| gpl-3.0 |
slarosa/QGIS | python/plugins/sextante/algs/SaveSelectedFeatures.py | 3 | 4504 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SaveSelectedFeatures.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.outputs.OutputVector import OutputVector
from sextante.parameters.ParameterVector import ParameterVector
from qgis.core import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sextante.core.QGisLayers import QGisLayers
class SaveSelectedFeatures(GeoAlgorithm):
'''This is an example algorithm that takes a vector layer and creates
a new one just with just those features of the input layer that are
selected.
It is meant to be used as an example of how to create your own SEXTANTE
algorithms and explain methods and variables used to do it.
An algorithm like this will be available in all SEXTANTE elements, and
there is not need for additional work.
All SEXTANTE algorithms should extend the GeoAlgorithm class'''
#constants used to refer to parameters and outputs.
#They will be used when calling the algorithm from another algorithm,
#or when calling SEXTANTE from the QGIS console.
OUTPUT_LAYER = "OUTPUT_LAYER"
INPUT_LAYER = "INPUT_LAYER"
def defineCharacteristics(self):
'''Here we define the inputs and output of the algorithm, along
with some other properties'''
#the name that the user will see in the toolbox
self.name = "Save selected features"
#the branch of the toolbox under which the algorithm will appear
self.group = "Vector general tools"
#we add the input vector layer. It can have any kind of geometry
#It is a mandatory (not optional) one, hence the False argument
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input layer", ParameterVector.VECTOR_TYPE_ANY, False))
# we add a vector layer as output
self.addOutput(OutputVector(self.OUTPUT_LAYER, "Output layer with selected features"))
def processAlgorithm(self, progress):
'''Here is where the processing itself takes place'''
#the first thing to do is retrieve the values of the parameters
#entered by the user
inputFilename = self.getParameterValue(self.INPUT_LAYER)
output = self.getOutputFromName(self.OUTPUT_LAYER)
#input layers values are always a string with its location.
#That string can be converted into a QGIS object (a QgsVectorLayer in this case))
#using the Sextante.getObject() method
vectorLayer = QGisLayers.getObjectFromUri(inputFilename)
#And now we can process
#First we create the output layer.
#To do so, we call the getVectorWriter method in the Output object.
#That will give as a SextanteVectorWriter, that we can later use to add features.
provider = vectorLayer.dataProvider()
writer = output.getVectorWriter( provider.fields(), provider.geometryType(), vectorLayer.crs() )
#Now we take the selected features and add them to the output layer
features = QGisLayers.features(vectorLayer)
total = len(features)
i = 0
for feat in features:
writer.addFeature(feat)
progress.setPercentage(100 * i / float(total))
i += 1
del writer
#There is nothing more to do here. We do not have to open the layer that we have created.
#SEXTANTE will take care of that, or will handle it if this algorithm is executed within
#a complex model
| gpl-2.0 |
prds21/barrial-movie | barrial-movie/channels/quierodibujosanimados.py | 9 | 5799 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para quierodibujosanimados
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
DEBUG = config.get_setting("debug")
__category__ = "A"
__type__ = "generic"
__title__ = "Quiero dibujos animados"
__channel__ = "quierodibujosanimados"
__language__ = "ES"
__creationdate__ = "20121112"
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.channels.quierodibujosanimados mainlist")
#itemlist.append( Item(channel=__channel__ , action="novedades" , title="Novedades" , url="http://www.quierodibujosanimados.com/"))
return series( Item(channel=__channel__ , action="series" , title="Series" , url="http://www.quierodibujosanimados.com/"))
def series(item):
logger.info("pelisalacarta.channels.quierodibujosanimados series")
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<ul class="categorias">(.*?)</ul')
#<a href="http://www.quierodibujosanimados.com/cat/popeye-el-marino/38" title="Popeye el marino">Popeye el marino</a>
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg"))
next_page_url = scrapertools.find_single_match(data,'</span[^<]+<a href="([^"]+)">')
if next_page_url!="":
itemlist.append( Item(channel=__channel__, action="episodios", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page_url) , folder=True, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg") )
return itemlist
def episodios(item):
logger.info("pelisalacarta.channels.quierodibujosanimados episodios")
'''
<li>
<div class="info">
<h2><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h2>
<p>Caillou volvía con su hermanita Rosi y su mamá de la biblioteca y traían un montón de libros que Caillou quería leer, especialmente uno de piratas. Capítulo titulado "Caillou ratón de biblioteca".</p>
<div class="pie">
<div class="categoria">
<span>Categoría:</span>
<a href="http://www.quierodibujosanimados.com/cat/caillou/14" title="Caillou" class="categoria">Caillou</a>
</div>
<div class="puntuacion">
<div class="rating_16 punt_0" data-noticia="954">
<span>0.5</span>
<span>1</span>
<span>1.5</span>
<span>2</span>
<span>2.5</span>
<span>3</span>
<span>3.5</span>
<span>4</span>
<span>4.5</span>
<span>5</span>
</div>
</div>
</div>
<span class="pico"></span>
</div>
<div class="dibujo">
<a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca" class="thumb">
<img src="http://www.quierodibujosanimados.com/i/thm-Caillou-raton-de-biblioteca.jpg" alt="Caillou ratón de biblioteca" width="137" height="174" />
</a>
<h4><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h4>
</div>
</li>
'''
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<div class="dibujo"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg"))
next_page_url = scrapertools.find_single_match(data,'</span[^<]+<a href="([^"]+)">')
if next_page_url!="":
itemlist.append( Item(channel=__channel__, action="episodios", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page_url) , folder=True, fanart="http://pelisalacarta.mimediacenter.info/fanart/quierodibujosanimados.jpg") )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
bien = True
from servers import servertools
# mainlist
serie_itemlist = mainlist(Item())
# Comprueba que todas las opciones tengan algo (excepto el buscador)
for serie_item in serie_itemlist:
episodio_itemlist = episodios(serie_item)
for episodio_item in episodio_itemlist:
mirrors = servertools.find_video_items(item=episodio_item)
if len(mirrors)>0:
return True
return False | gpl-3.0 |
username115/FRCScouting | gen_scripts/SQLITEContractGen.py | 1 | 8061 | #! /usr/bin/python
_description = '''
This script take in a SQL file with INSERTS and CREATES and transforms
it into a SQLite contract in Java. Meant to be used with a phpmyadmin
exported sql file. Defaults assume the FRC 836 file structure.
'''
_defaultRun = '''
python SQLITEContractGen.py
--packagename=org.frc836.database
--classname=FRCScoutingContract
--infile=FRC_Scouting_Server/scouting.sql
--outfile=src/org/frc836/database/FRCScoutingContract.java
'''
__author__ = "Jonny"
__version__ = "2.0"
__copyright__ = ""
import SQLHelper
import autogeninfo
import os
import re
import argparse
# note to self. Look into the 'textwrap' class for functionality
class SqlToJava():
re_GetSqlVar = re.compile(r"[`](?P<var>\w+)[`]")
re_CreateStatement = re.compile(r'''
\s* CREATE \s+ TABLE \s+ IF \s+ NOT \s+ EXISTS \s+ # grabs the create statement
[`] (?P<tablename>\w+) [`] # matches the table name
(?P<body>[^;]+) # matches the body
[;]
''',re.VERBOSE)
re_InsertStatement = re.compile(r'''
\s* INSERT \s+ INTO \s+ # finds the insert statements
[`] (?P<tablename>\w+) [`] # matches the tablename
\s+ [(] \s*
(?P<colnames>[^)]+)
[)] \s* VALUES [^(]*
(?P<body>[^;]+) [;]
''',re.VERBOSE)
re_GetColumn = re.compile(r'''
(^|\n) \s+
[`] (?P<name>\w+) [`] # grabs the column name
\s+ (?P<type>\S+) \s+ # grabs the type
''',re.VERBOSE)
re_GetRow = re.compile(r'''
[(]
(?P<row>.+)
[)]
#[(] (?P<row>[^)]+) [)] # matches everything in parens
''',re.VERBOSE)
def __init__(self, packageName=None, className="DefaultJavaClassName",
baseClass=None, baseClassHeader=None):
self.tables = list()
self.packageName = packageName
self.className = className
self.baseClass = baseClass
self.baseClassHeader = baseClassHeader
def findTableName(self, tableName):
for i in range(0, len(self.tables)):
if tableName == self.tables[i].name:
return i
return None
def addTable(self, table):
self.tables.append(table)
def createStr_Header(self):
_myscriptname = os.path.basename(__file__)
ret = "/*\n"
ret += autogeninfo._autogenScriptInfo_Str(__version__, _myscriptname) +"\n"
ret += "*/\n\n"
ret += "package "+ self.packageName +";\n"
ret += "\n"
if self.baseClassHeader:
ret += "import "+ self.baseClassHeader +";\n"
ret += "\n"
ret += "public final class "+ self.className +" {\n"
ret += "\tpublic "+ self.className +"() {}"
return ret
def createStr_Footer(self):
ret = "}"
return ret
def createStr_Classes(self):
s = ""
for table in self.tables:
s += table.createStr_Class(self.baseClass) +"\n\n"
return s[0:-2]
def createStr_DropStr(self):
s = "public static final String[] SQL_DELETE_ENTRIES = {\n"
for table in self.tables:
tmp = "\""+ table.createStr_DropStr() +"\""
s += SQLHelper.indent(tmp) +",\n"
return s[0:-2] +"\n};"
def createStr_CreateStr(self):
s = "public static final String[] SQL_CREATE_ENTRIES = {\n"
for table in self.tables:
s += SQLHelper.indent( SQLHelper.toJavaString(table.createStr_CreateStr()))
s += ",\n\n"
tmp = table.createStr_InsertStr()
if tmp:
s += SQLHelper.indent( SQLHelper.toJavaString(tmp))
s += ",\n\n"
return s[0:-3] +"\n};"
def createStr_JavaSqLite(self):
s = ""
s += self.createStr_Header() +"\n"
s += "\n"
s += SQLHelper.indent(self.createStr_Classes()) +"\n"
s += "\n"
s += SQLHelper.indent(self.createStr_CreateStr()) +"\n"
s += "\n"
s += SQLHelper.indent(self.createStr_DropStr()) +"\n"
s += "\n"
s += self.createStr_Footer()
return s
def _parseStatement_Create(self, statement):
match = self.re_CreateStatement.search(statement)
if match:
table = SQLHelper.SqlTable( match.group('tablename') )
for ln in match.group('body').split(','):
match = self.re_GetColumn.search(ln)
if match:
name = match.group('name')
type = match.group('type')
if re.search("unsigned",ln): unsigned = True
else: unsigned = False
if re.search("NOT NULL",ln): nullVal = False
else: nullVal = False
if re.search("AUTO_INCREMENT",ln): autoInc = True
else: autoInc = False
match = re.search("DEFAULT\s+(?P<val>\S+)",ln)
if match: default = match.group('val')
else: default=None
table.addColumn( SQLHelper.SqlColumn(columnName=name, columnType=type,
isPrimary=False, defaultVal=default,
nullValid=nullVal, autoIncrement=autoInc,
isUnsigned=unsigned) )
if re.search("PRIMARY\s+KEY",ln):
primaryKey = re.search("PRIMARY\s+KEY\s+[(][`](?P<key>\w+)[`][)]",ln).group('key')
for column in table.columns:
if column.name == primaryKey:
column.primary = True
self.addTable(table)
def _parseStatement_Insert(self, statement):
match = self.re_InsertStatement.search(statement)
if match:
tableName = match.group('tablename')
colNames = match.group('colnames')
body = match.group('body')
i_table = self.findTableName(tableName)
mapping = self.tables[i_table].getColumnMapping_csv(colNames)
for row in self.re_GetRow.findall( body ):
self.tables[i_table].addRow(row)
def readFile(self, filename, verbose=False):
f = open(filename,'r')
if verbose: print("Reading from \'"+ str(f.name) +"\' in mode \'"+ str(f.mode) +"\'")
for ln in f.read().split(';'):
ln += ';'
if self.re_CreateStatement.search(ln):
self._parseStatement_Create(ln)
elif self.re_InsertStatement.search(ln):
self._parseStatement_Insert(ln)
f.close()
def writeJavaSqLiteFile(self, filename, verbose=False):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
if verbose: print("Creating output directory: " + directory)
os.makedirs(directory)
f = open(filename,'w')
if verbose: print("Writing to \'"+ str(f.name) +"\' in mode \'"+ str(f.mode) +"\'")
f.write( self.createStr_JavaSqLite() )
f.close()
def printCreates(self):
for table in self.tables:
print( table.createStr_CreateStr() +"\n")
def printInserts(self):
for table in self.tables:
print( table.createStr_InsertStr() +"\n")
#===============================================================================
# init_args()
# Sets up the command line parsing logic. Any changes to cmd line input should
# take place here.
# ------------------------------------------
# return
# args : the list of parsed arguments
#===============================================================================
def init_args():
parser = argparse.ArgumentParser(description=_description)
parser.add_argument('-i','--infile',dest='infilename',required=False,
help='The .sql file that you want to parse from')
parser.add_argument('-o','--outfile',dest='outfilename',required=False,
help='The Java file you want to write out to')
parser.add_argument('--classname','-cn',required=False,
help='The name of the Java class')
parser.add_argument('--packagename','-pn',required=False,
help='The database package to use')
parser.add_argument('--baseclass','-bc',required=False,
help='The class that all of the generated classes will implement')
parser.add_argument('--baseclassHeader','-bch',required=False,
help='The file that needs to be imported to use the baseclass')
parser.set_defaults( infilename='FRC_Scouting_Server/scouting.sql',
outfilename='src/org/frc836/database/FRCScoutingContract.java',
packagename='org.frc836.database',
classname='FRCScoutingContract',
baseclass='BaseColumns',
baseclassHeader='android.provider.BaseColumns'
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = init_args()
SqlCreator = SqlToJava(packageName = args.packagename,
className = args.classname,
baseClass = args.baseclass,
baseClassHeader = args.baseclassHeader
)
SqlCreator.readFile(args.infilename, verbose=True)
SqlCreator.writeJavaSqLiteFile(args.outfilename, verbose=True)
| apache-2.0 |
grlee77/scipy | scipy/spatial/tests/test_slerp.py | 11 | 15434 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.spatial import geometric_slerp
def _generate_spherical_points(ndim=3, n_pts=2):
# generate uniform points on sphere
# see: https://stackoverflow.com/a/23785326
# tentatively extended to arbitrary dims
# for 0-sphere it will always produce antipodes
np.random.seed(123)
points = np.random.normal(size=(n_pts, ndim))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points[0], points[1]
class TestGeometricSlerp:
# Test various properties of the geometric slerp code
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [0, 3, 17])
def test_shape_property(self, n_dims, n_pts):
# geometric_slerp output shape should match
# input dimensionality & requested number
# of interpolation points
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert actual.shape == (n_pts, n_dims)
@pytest.mark.parametrize("n_dims", [2, 3, 5, 7, 9])
@pytest.mark.parametrize("n_pts", [3, 17])
def test_include_ends(self, n_dims, n_pts):
# geometric_slerp should return a data structure
# that includes the start and end coordinates
# when t includes 0 and 1 ends
# this is convenient for plotting surfaces represented
# by interpolations for example
# the generator doesn't work so well for the unit
# sphere (it always produces antipodes), so use
# custom values there
start, end = _generate_spherical_points(n_dims, 2)
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, n_pts))
assert_allclose(actual[0], start)
assert_allclose(actual[-1], end)
@pytest.mark.parametrize("start, end", [
# both arrays are not flat
(np.zeros((1, 3)), np.ones((1, 3))),
# only start array is not flat
(np.zeros((1, 3)), np.ones(3)),
# only end array is not flat
(np.zeros(1), np.ones((3, 1))),
])
def test_input_shape_flat(self, start, end):
# geometric_slerp should handle input arrays that are
# not flat appropriately
with pytest.raises(ValueError, match='one-dimensional'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# 7-D and 3-D ends
(np.zeros(7), np.ones(3)),
# 2-D and 1-D ends
(np.zeros(2), np.ones(1)),
# empty, "3D" will also get caught this way
(np.array([]), np.ones(3)),
])
def test_input_dim_mismatch(self, start, end):
# geometric_slerp must appropriately handle cases where
# an interpolation is attempted across two different
# dimensionalities
with pytest.raises(ValueError, match='dimensions'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end", [
# both empty
(np.array([]), np.array([])),
])
def test_input_at_least1d(self, start, end):
# empty inputs to geometric_slerp must
# be handled appropriately when not detected
# by mismatch
with pytest.raises(ValueError, match='at least two-dim'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
@pytest.mark.parametrize("start, end, expected", [
# North and South Poles are definitely antipodes
# but should be handled gracefully now
(np.array([0, 0, 1.0]), np.array([0, 0, -1.0]), "warning"),
# this case will issue a warning & be handled
# gracefully as well;
# North Pole was rotated very slightly
# using r = R.from_euler('x', 0.035, degrees=True)
# to achieve Euclidean distance offset from diameter by
# 9.328908379124812e-08, within the default tol
(np.array([0.00000000e+00,
-6.10865200e-04,
9.99999813e-01]), np.array([0, 0, -1.0]), "warning"),
# this case should succeed without warning because a
# sufficiently large
# rotation was applied to North Pole point to shift it
# to a Euclidean distance of 2.3036691931821451e-07
# from South Pole, which is larger than tol
(np.array([0.00000000e+00,
-9.59930941e-04,
9.99999539e-01]), np.array([0, 0, -1.0]), "success"),
])
def test_handle_antipodes(self, start, end, expected):
# antipodal points must be handled appropriately;
# there are an infinite number of possible geodesic
# interpolations between them in higher dims
if expected == "warning":
with pytest.warns(UserWarning, match='antipodes'):
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
else:
res = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 10))
# antipodes or near-antipodes should still produce
# slerp paths on the surface of the sphere (but they
# may be ambiguous):
assert_allclose(np.linalg.norm(res, axis=1), 1.0)
@pytest.mark.parametrize("start, end, expected", [
# 2-D with n_pts=4 (two new interpolation points)
# this is an actual circle
(np.array([1, 0]),
np.array([0, 1]),
np.array([[1, 0],
[np.sqrt(3) / 2, 0.5], # 30 deg on unit circle
[0.5, np.sqrt(3) / 2], # 60 deg on unit circle
[0, 1]])),
# likewise for 3-D (add z = 0 plane)
# this is an ordinary sphere
(np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([[1, 0, 0],
[np.sqrt(3) / 2, 0.5, 0],
[0.5, np.sqrt(3) / 2, 0],
[0, 1, 0]])),
# for 5-D, pad more columns with constants
# zeros are easiest--non-zero values on unit
# circle are more difficult to reason about
# at higher dims
(np.array([1, 0, 0, 0, 0]),
np.array([0, 1, 0, 0, 0]),
np.array([[1, 0, 0, 0, 0],
[np.sqrt(3) / 2, 0.5, 0, 0, 0],
[0.5, np.sqrt(3) / 2, 0, 0, 0],
[0, 1, 0, 0, 0]])),
])
def test_straightforward_examples(self, start, end, expected):
# some straightforward interpolation tests, sufficiently
# simple to use the unit circle to deduce expected values;
# for larger dimensions, pad with constants so that the
# data is N-D but simpler to reason about
actual = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
assert_allclose(actual, expected, atol=1e-16)
@pytest.mark.parametrize("t", [
# both interval ends clearly violate limits
np.linspace(-20, 20, 300),
# only one interval end violating limit slightly
np.linspace(-0.0001, 0.0001, 17),
])
def test_t_values_limits(self, t):
# geometric_slerp() should appropriately handle
# interpolation parameters < 0 and > 1
with pytest.raises(ValueError, match='interpolation parameter'):
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=t)
@pytest.mark.parametrize("start, end", [
(np.array([1]),
np.array([0])),
(np.array([0]),
np.array([1])),
(np.array([-17.7]),
np.array([165.9])),
])
def test_0_sphere_handling(self, start, end):
# it does not make sense to interpolate the set of
# two points that is the 0-sphere
with pytest.raises(ValueError, match='at least two-dim'):
_ = geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 4))
@pytest.mark.parametrize("tol", [
# an integer currently raises
5,
# string raises
"7",
# list and arrays also raise
[5, 6, 7], np.array(9.0),
])
def test_tol_type(self, tol):
# geometric_slerp() should raise if tol is not
# a suitable float type
with pytest.raises(ValueError, match='must be a float'):
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("tol", [
-5e-6,
-7e-10,
])
def test_tol_sign(self, tol):
# geometric_slerp() currently handles negative
# tol values, as long as they are floats
_ = geometric_slerp(start=np.array([1, 0]),
end=np.array([0, 1]),
t=np.linspace(0, 1, 5),
tol=tol)
@pytest.mark.parametrize("start, end", [
# 1-sphere (circle) with one point at origin
# and the other on the circle
(np.array([1, 0]), np.array([0, 0])),
# 2-sphere (normal sphere) with both points
# just slightly off sphere by the same amount
# in different directions
(np.array([1 + 1e-6, 0, 0]),
np.array([0, 1 - 1e-6, 0])),
# same thing in 4-D
(np.array([1 + 1e-6, 0, 0, 0]),
np.array([0, 1 - 1e-6, 0, 0])),
])
def test_unit_sphere_enforcement(self, start, end):
# geometric_slerp() should raise on input that clearly
# cannot be on an n-sphere of radius 1
with pytest.raises(ValueError, match='unit n-sphere'):
geometric_slerp(start=start,
end=end,
t=np.linspace(0, 1, 5))
@pytest.mark.parametrize("start, end", [
# 1-sphere 45 degree case
(np.array([1, 0]),
np.array([np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
# 2-sphere 135 degree case
(np.array([1, 0]),
np.array([-np.sqrt(2) / 2.,
np.sqrt(2) / 2.])),
])
@pytest.mark.parametrize("t_func", [
np.linspace, np.logspace])
def test_order_handling(self, start, end, t_func):
# geometric_slerp() should handle scenarios with
# ascending and descending t value arrays gracefully;
# results should simply be reversed
# for scrambled / unsorted parameters, the same values
# should be returned, just in scrambled order
num_t_vals = 20
np.random.seed(789)
forward_t_vals = t_func(0, 10, num_t_vals)
# normalize to max of 1
forward_t_vals /= forward_t_vals.max()
reverse_t_vals = np.flipud(forward_t_vals)
shuffled_indices = np.arange(num_t_vals)
np.random.shuffle(shuffled_indices)
scramble_t_vals = forward_t_vals.copy()[shuffled_indices]
forward_results = geometric_slerp(start=start,
end=end,
t=forward_t_vals)
reverse_results = geometric_slerp(start=start,
end=end,
t=reverse_t_vals)
scrambled_results = geometric_slerp(start=start,
end=end,
t=scramble_t_vals)
# check fidelity to input order
assert_allclose(forward_results, np.flipud(reverse_results))
assert_allclose(forward_results[shuffled_indices],
scrambled_results)
@pytest.mark.parametrize("t", [
# string:
"15, 5, 7",
# complex numbers currently produce a warning
# but not sure we need to worry about it too much:
# [3 + 1j, 5 + 2j],
])
def test_t_values_conversion(self, t):
with pytest.raises(ValueError):
_ = geometric_slerp(start=np.array([1]),
end=np.array([0]),
t=t)
def test_accept_arraylike(self):
# array-like support requested by reviewer
# in gh-10380
actual = geometric_slerp([1, 0], [0, 1], [0, 1/3, 0.5, 2/3, 1])
# expected values are based on visual inspection
# of the unit circle for the progressions along
# the circumference provided in t
expected = np.array([[1, 0],
[np.sqrt(3) / 2, 0.5],
[np.sqrt(2) / 2,
np.sqrt(2) / 2],
[0.5, np.sqrt(3) / 2],
[0, 1]], dtype=np.float64)
# Tyler's original Cython implementation of geometric_slerp
# can pass at atol=0 here, but on balance we will accept
# 1e-16 for an implementation that avoids Cython and
# makes up accuracy ground elsewhere
assert_allclose(actual, expected, atol=1e-16)
def test_scalar_t(self):
# when t is a scalar, return value is a single
# interpolated point of the appropriate dimensionality
# requested by reviewer in gh-10380
actual = geometric_slerp([1, 0], [0, 1], 0.5)
expected = np.array([np.sqrt(2) / 2,
np.sqrt(2) / 2], dtype=np.float64)
assert actual.shape == (2,)
assert_allclose(actual, expected)
@pytest.mark.parametrize('start', [
np.array([1, 0, 0]),
np.array([0, 1]),
])
def test_degenerate_input(self, start):
# handle start == end with repeated value
# like np.linspace
expected = [start] * 5
actual = geometric_slerp(start=start,
end=start,
t=np.linspace(0, 1, 5))
assert_allclose(actual, expected)
@pytest.mark.parametrize('k', np.logspace(-10, -1, 10))
def test_numerical_stability_pi(self, k):
# geometric_slerp should have excellent numerical
# stability for angles approaching pi between
# the start and end points
angle = np.pi - k
ts = np.linspace(0, 1, 100)
P = np.array([1, 0, 0, 0])
Q = np.array([np.cos(angle), np.sin(angle), 0, 0])
# the test should only be enforced for cases where
# geometric_slerp determines that the input is actually
# on the unit sphere
with np.testing.suppress_warnings() as sup:
sup.filter(UserWarning)
result = geometric_slerp(P, Q, ts, 1e-18)
norms = np.linalg.norm(result, axis=1)
error = np.max(np.abs(norms - 1))
assert error < 4e-15
| bsd-3-clause |
mariopro/youtube-dl | youtube_dl/extractor/npo.py | 18 | 17167 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
)
class NPOBaseIE(InfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
token = self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
# Decryption algorithm extracted from http://npoplayer.omroep.nl/csjs/npoplayer-min.js
token_l = list(token)
first = second = None
for i in range(5, len(token_l) - 4):
if token_l[i].isdigit():
if first is None:
first = i
elif second is None:
second = i
if first is None or second is None:
first = 12
second = 13
token_l[first], token_l[second] = token_l[second], token_l[first]
return ''.join(token_l)
class NPOIE(NPOBaseIE):
IE_NAME = 'npo'
IE_DESC = 'npo.nl and ntr.nl'
_VALID_URL = r'''(?x)
(?:
npo:|
https?://
(?:www\.)?
(?:
npo\.nl/(?!live|radio)(?:[^/]+/){2}|
ntr\.nl/(?:[^/]+/){2,}|
omroepwnl\.nl/video/fragment/[^/]+__
)
)
(?P<id>[^/?#]+)
'''
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show: The best of.',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht: De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
{
'url': 'http://www.ntr.nl/Aap-Poot-Pies/27/detail/Aap-poot-pies/VPWON_1233944#content',
'md5': '01c6a2841675995da1f0cf776f03a9c3',
'info_dict': {
'id': 'VPWON_1233944',
'ext': 'm4v',
'title': 'Aap, poot, pies',
'description': 'md5:c9c8005d1869ae65b858e82c01a91fde',
'upload_date': '20150508',
'duration': 599,
},
},
{
'url': 'http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698',
'md5': 'd30cd8417b8b9bca1fdff27428860d08',
'info_dict': {
'id': 'POW_00996502',
'ext': 'm4v',
'title': '''"Dit is wel een 'landslide'..."''',
'description': 'md5:f8d66d537dfb641380226e31ca57b8e8',
'upload_date': '20150508',
'duration': 462,
},
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
# For some videos actual video id (prid) is different (e.g. for
# http://www.omroepwnl.nl/video/fragment/vandaag-de-dag-verkiezingen__POMS_WNL_853698
# video id is POMS_WNL_853698 but prid is POW_00996502)
video_id = metadata.get('prid') or video_id
# titel is too generic in some cases so utilize aflevering_titel as well
# when available (e.g. http://tegenlicht.vpro.nl/afleveringen/2014-2015/access-to-africa.html)
title = metadata['titel']
sub_title = metadata.get('aflevering_titel')
if sub_title and sub_title != title:
title += ': %s' % sub_title
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = [{
'ext': 'vtt',
'url': 'http://e.omroep.nl/tt888/%s' % video_id,
}]
return {
'id': video_id,
'title': title,
'description': metadata.get('info'),
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
# smooth streaming is not supported
if stream_type in ['ss', 'ms']:
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
'Unable to download %s URL' % stream_type,
transform_source=strip_jsonp, fatal=False)
if not stream_url:
continue
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
'preference': -10,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class VPROIE(NPOIE):
_VALID_URL = r'https?://(?:www\.)?(?:tegenlicht\.)?vpro\.nl/(?:[^/]+/){2,}(?P<id>[^/]+)\.html'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'De toekomst komt uit Afrika',
'description': 'md5:52cf4eefbc96fffcbdc06d024147abea',
'upload_date': '20130225',
},
},
{
'url': 'http://www.vpro.nl/programmas/2doc/2015/sergio-herman.html',
'info_dict': {
'id': 'sergio-herman',
'title': 'Sergio Herman: Fucking perfect',
},
'playlist_count': 2,
},
{
# playlist with youtube embed
'url': 'http://www.vpro.nl/programmas/2doc/2015/education-education.html',
'info_dict': {
'id': 'education-education',
'title': '2Doc',
},
'playlist_count': 2,
}
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id if not video_id.startswith('http') else video_id)
for video_id in re.findall(r'data-media-id="([^"]+)"', webpage)
]
playlist_title = self._search_regex(
r'<title>\s*([^>]+?)\s*-\s*Teledoc\s*-\s*VPRO\s*</title>',
webpage, 'playlist title', default=None) or self._og_search_title(webpage)
return self.playlist_result(entries, playlist_id, playlist_title)
class WNLIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?omroepwnl\.nl/video/detail/(?P<id>[^/]+)__\d+'
_TEST = {
'url': 'http://www.omroepwnl.nl/video/detail/vandaag-de-dag-6-mei__060515',
'info_dict': {
'id': 'vandaag-de-dag-6-mei',
'title': 'Vandaag de Dag 6 mei',
},
'playlist_count': 4,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('npo:%s' % video_id, 'NPO')
for video_id, part in re.findall(
r'<a[^>]+href="([^"]+)"[^>]+class="js-mid"[^>]*>(Deel \d+)', webpage)
]
playlist_title = self._html_search_regex(
r'(?s)<h1[^>]+class="subject"[^>]*>(.+?)</h1>',
webpage, 'playlist title')
return self.playlist_result(entries, playlist_id, playlist_title)
| unlicense |
w1ll1am23/home-assistant | tests/components/sql/test_sensor.py | 3 | 2727 | """The test for the sql sensor platform."""
import pytest
import voluptuous as vol
from homeassistant.components.sql.sensor import validate_sql_select
from homeassistant.const import STATE_UNKNOWN
from homeassistant.setup import async_setup_component
async def test_query(hass):
"""Test the SQL sensor."""
config = {
"sensor": {
"platform": "sql",
"db_url": "sqlite://",
"queries": [
{
"name": "count_tables",
"query": "SELECT 5 as value",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.count_tables")
assert state.state == "5"
assert state.attributes["value"] == 5
async def test_invalid_query(hass):
"""Test the SQL sensor for invalid queries."""
with pytest.raises(vol.Invalid):
validate_sql_select("DROP TABLE *")
config = {
"sensor": {
"platform": "sql",
"db_url": "sqlite://",
"queries": [
{
"name": "count_tables",
"query": "SELECT * value FROM sqlite_master;",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.count_tables")
assert state.state == STATE_UNKNOWN
@pytest.mark.parametrize(
"url,expected_patterns,not_expected_patterns",
[
(
"sqlite://homeassistant:[email protected]",
["sqlite://****:****@homeassistant.local"],
["sqlite://homeassistant:[email protected]"],
),
(
"sqlite://homeassistant.local",
["sqlite://homeassistant.local"],
[],
),
],
)
async def test_invalid_url(hass, caplog, url, expected_patterns, not_expected_patterns):
"""Test credentials in url is not logged."""
config = {
"sensor": {
"platform": "sql",
"db_url": url,
"queries": [
{
"name": "count_tables",
"query": "SELECT 5 as value",
"column": "value",
}
],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
for pattern in not_expected_patterns:
assert pattern not in caplog.text
for pattern in expected_patterns:
assert pattern in caplog.text
| apache-2.0 |
cloakedcode/CouchPotatoServer | libs/suds/bindings/document.py | 204 | 5792 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for the (WS) SOAP I{document/literal}.
"""
from logging import getLogger
from suds import *
from suds.bindings.binding import Binding
from suds.sax.element import Element
log = getLogger(__name__)
class Document(Binding):
"""
The document/literal style. Literal is the only (@use) supported
since document/encoded is pretty much dead.
Although the soap specification supports multiple documents within the soap
<body/>, it is very uncommon. As such, suds presents an I{RPC} view of
service methods defined with a single document parameter. This is done so
that the user can pass individual parameters instead of one, single document.
To support the complete specification, service methods defined with multiple documents
(multiple message parts), must present a I{document} view for that method.
"""
def bodycontent(self, method, args, kwargs):
#
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
if not len(method.soap.input.body.parts):
return ()
wrapped = method.soap.input.body.wrapped
if wrapped:
pts = self.bodypart_types(method)
root = self.document(pts[0])
else:
root = []
n = 0
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
n += 1
p = self.mkparam(method, pd, value)
if p is None:
continue
if not wrapped:
ns = pd[1].namespace('ns0')
p.setPrefix(ns[0], ns[1])
root.append(p)
return root
def replycontent(self, method, body):
wrapped = method.soap.output.body.wrapped
if wrapped:
return body[0].children
else:
return body.children
def document(self, wrapper):
"""
Get the document root. For I{document/literal}, this is the
name of the wrapper element qualifed by the schema tns.
@param wrapper: The method name.
@type wrapper: L{xsd.sxbase.SchemaObject}
@return: A root element.
@rtype: L{Element}
"""
tag = wrapper[1].name
ns = wrapper[1].namespace('ns0')
d = Element(tag, ns=ns)
return d
def mkparam(self, method, pdef, object):
#
# Expand list parameters into individual parameters
# each with the type information. This is because in document
# arrays are simply unbounded elements.
#
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkparam(method, pdef, item))
return tags
else:
return Binding.mkparam(self, method, pdef, object)
def param_defs(self, method):
#
# Get parameter definitions for document literal.
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
pts = self.bodypart_types(method)
wrapped = method.soap.input.body.wrapped
if not wrapped:
return pts
result = []
# wrapped
for p in pts:
resolved = p[1].resolve()
for child, ancestry in resolved:
if child.isattr():
continue
if self.bychoice(ancestry):
log.debug(
'%s\ncontained by <choice/>, excluded as param for %s()',
child,
method.name)
continue
result.append((child.name, child))
return result
def returned_types(self, method):
result = []
wrapped = method.soap.output.body.wrapped
rts = self.bodypart_types(method, input=False)
if wrapped:
for pt in rts:
resolved = pt.resolve(nobuiltin=True)
for child, ancestry in resolved:
result.append(child)
break
else:
result += rts
return result
def bychoice(self, ancestry):
"""
The ancestry contains a <choice/>
@param ancestry: A list of ancestors.
@type ancestry: list
@return: True if contains <choice/>
@rtype: boolean
"""
for x in ancestry:
if x.choice():
return True
return False | gpl-3.0 |
williamfeng323/py-web | flask/lib/python3.6/site-packages/sqlalchemy/dialects/mssql/pymssql.py | 32 | 3143 | # mssql/pymssql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (2, 1, 1):
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
super(MSDialect_pymssql, self).__init__(**params)
self.use_scope_identity = True
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
| mit |
jkonecny12/anaconda | tests/nosetests/pyanaconda_tests/core/signal_test.py | 5 | 5383 | #
# Martin Kolman <[email protected]>
#
# Copyright 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
# Test the Python-based signal and slot implementation.
#
import unittest
from pyanaconda.core.signal import Signal
class FooClass(object):
def __init__(self):
self._var = None
@property
def var(self):
return self._var
def set_var(self, value):
self._var = value
class SignalTestCase(unittest.TestCase):
def setUp(self):
self.var = None
def method_test(self):
"""Test if a method can be correctly connected to a signal."""
signal = Signal()
foo = FooClass()
self.assertIsNone(foo.var)
# connect the signal
signal.connect(foo.set_var)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(foo.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(foo.var, "baz")
# now try to disconnect the signal
signal.disconnect(foo.set_var)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(foo.var, "baz")
def function_test(self):
"""Test if a local function can be correctly connected to a signal."""
# create a local function
def set_var(value):
self.var = value
signal = Signal()
self.assertIsNone(self.var)
# connect the signal
signal.connect(set_var)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(self.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(self.var, "baz")
# now try to disconnect the signal
signal.disconnect(set_var)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(self.var, "baz")
def lambda_test(self):
"""Test if a lambda can be correctly connected to a signal."""
foo = FooClass()
signal = Signal()
self.assertIsNone(foo.var)
# connect the signal
# pylint: disable=unnecessary-lambda
lambda_instance = lambda x: foo.set_var(x)
signal.connect(lambda_instance)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(foo.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(foo.var, "baz")
# now try to disconnect the signal
signal.disconnect(lambda_instance)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(foo.var, "baz")
def clear_test(self):
"""Test if the clear() method correctly clears any connected callbacks."""
def set_var(value):
self.var = value
signal = Signal()
foo = FooClass()
lambda_foo = FooClass()
self.assertIsNone(foo.var)
self.assertIsNone(lambda_foo.var)
self.assertIsNone(self.var)
# connect the callbacks
signal.connect(set_var)
signal.connect(foo.set_var)
# pylint: disable=unnecessary-lambda
signal.connect(lambda x: lambda_foo.set_var(x))
# trigger the signal
signal.emit("bar")
# check that the callbacks were triggered
self.assertEqual(self.var, "bar")
self.assertEqual(foo.var, "bar")
self.assertEqual(lambda_foo.var, "bar")
# clear the callbacks
signal.clear()
# trigger the signal again
signal.emit("anaconda")
# check that the callbacks were not triggered
self.assertEqual(self.var, "bar")
self.assertEqual(foo.var, "bar")
self.assertEqual(lambda_foo.var, "bar")
def signal_chain_test(self):
"""Check if signals can be chained together."""
foo = FooClass()
self.assertIsNone(foo.var)
signal1 = Signal()
signal1.connect(foo.set_var)
signal2 = Signal()
signal2.connect(signal1.emit)
signal3 = Signal()
signal3.connect(signal2.emit)
# trigger the chain
signal3.emit("bar")
# check if the initial callback was triggered
self.assertEqual(foo.var, "bar")
| gpl-2.0 |
PXke/invenio | invenio/ext/logging/wrappers.py | 1 | 19138 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
""" Error handling library """
__revision__ = "$Id$"
import traceback
import os
import sys
import time
import datetime
import re
import inspect
from flask import current_app
from six import iteritems, StringIO
from invenio.base.globals import cfg
from .models import HstEXCEPTION
## Regular expression to match possible password related variable that should
## be disclosed in frame analysis.
RE_PWD = re.compile(r"pwd|pass|p_pw", re.I)
def get_pretty_wide_client_info(req):
"""Return in a pretty way all the avilable information about the current
user/client"""
if req:
from invenio.legacy.webuser import collect_user_info
user_info = collect_user_info(req)
keys = user_info.keys()
keys.sort()
max_key = max([len(key) for key in keys])
ret = ""
fmt = "%% %is: %%s\n" % max_key
for key in keys:
if RE_PWD.search(key):
continue
if key in ('uri', 'referer'):
ret += fmt % (key, "<%s>" % user_info[key])
else:
ret += fmt % (key, user_info[key])
if ret.endswith('\n'):
return ret[:-1]
else:
return ret
else:
return "No client information available"
def get_tracestack():
"""
If an exception has been caught, return the system tracestack or else
return tracestack of what is currently in the stack
"""
if traceback.format_tb(sys.exc_info()[2]):
delimiter = "\n"
tracestack_pretty = "Traceback: \n%s" % \
delimiter.join(traceback.format_tb(sys.exc_info()[2]))
else:
## force traceback except for this call
tracestack = traceback.extract_stack()[:-1]
tracestack_pretty = "%sForced traceback (most recent call last)" % \
(' '*4, )
for trace_tuple in tracestack:
tracestack_pretty += """
File "%(file)s", line %(line)s, in %(function)s
%(text)s""" % {
'file': trace_tuple[0],
'line': trace_tuple[1],
'function': trace_tuple[2],
'text': trace_tuple[3] is not None and \
str(trace_tuple[3]) or ""}
return tracestack_pretty
def register_emergency(msg, recipients=None):
"""Launch an emergency. This means to send email messages to each
address in 'recipients'. By default recipients will be obtained via
get_emergency_recipients() which loads settings from
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES
"""
from invenio.ext.email import send_email
if not recipients:
recipients = get_emergency_recipients()
recipients = set(recipients)
recipients.add(cfg['CFG_SITE_ADMIN_EMAIL'])
for address_str in recipients:
send_email(cfg['CFG_SITE_SUPPORT_EMAIL'], address_str, "Emergency notification", msg)
def get_emergency_recipients(recipient_cfg=None):
"""Parse a list of appropriate emergency email recipients from
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES, or from a provided dictionary
comprised of 'time constraint' => 'comma separated list of addresses'
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES format example:
CFG_SITE_EMERGENCY_EMAIL_ADDRESSES = {
'Sunday 22:00-06:00': '[email protected]',
'06:00-18:00': '[email protected],[email protected]',
'18:00-06:00': '[email protected]',
'*': '[email protected]'}
"""
from invenio.utils.date import parse_runtime_limit
if recipient_cfg is None:
recipient_cfg = cfg['CFG_SITE_EMERGENCY_EMAIL_ADDRESSES']
recipients = set()
for time_condition, address_str in recipient_cfg.items():
if time_condition and time_condition is not '*':
(current_range, future_range) = parse_runtime_limit(time_condition)
if not current_range[0] <= datetime.datetime.now() <= current_range[1]:
continue
recipients.update([address_str])
return list(recipients)
def find_all_values_to_hide(local_variables, analyzed_stack=None):
"""Return all the potential password to hyde."""
## Let's add at least the DB password.
if analyzed_stack is None:
ret = set([cfg['CFG_DATABASE_PASS']])
analyzed_stack = set()
else:
ret = set()
for key, value in iteritems(local_variables):
if id(value) in analyzed_stack:
## Let's avoid loops
continue
analyzed_stack.add(id(value))
if RE_PWD.search(key):
ret.add(str(value))
if isinstance(value, dict):
ret |= find_all_values_to_hide(value, analyzed_stack)
if '' in ret:
## Let's discard the empty string in case there is an empty password,
## or otherwise anything will be separated by '<*****>' in the output
## :-)
ret.remove('')
return ret
def get_pretty_traceback(req=None, exc_info=None, skip_frames=0):
"""
Given an optional request object and an optional exc_info,
returns a text string representing many details about an exception.
"""
if exc_info is None:
exc_info = sys.exc_info()
if exc_info[0]:
## We found an exception.
## We want to extract the name of the Exception
exc_name = exc_info[0].__name__
exc_value = str(exc_info[1])
filename, line_no, function_name = _get_filename_and_line(exc_info)
## Let's record when and where and what
www_data = "%(time)s -> %(name)s: %(value)s (%(file)s:%(line)s:%(function)s)" % {
'time': time.strftime("%Y-%m-%d %H:%M:%S"),
'name': exc_name,
'value': exc_value,
'file': filename,
'line': line_no,
'function': function_name }
## Let's retrieve contextual user related info, if any
try:
client_data = get_pretty_wide_client_info(req)
except Exception as err:
client_data = "Error in retrieving " \
"contextual information: %s" % err
## Let's extract the traceback:
tracestack_data_stream = StringIO()
print("\n** Traceback details \n", file=tracestack_data_stream)
traceback.print_exc(file=tracestack_data_stream)
stack = [frame[0] for frame in inspect.trace()]
#stack = [frame[0] for frame in inspect.getouterframes(exc_info[2])][skip_frames:]
try:
stack.reverse()
print("\n** Stack frame details", file=tracestack_data_stream)
values_to_hide = set()
for frame in stack:
try:
print(file=tracestack_data_stream)
print("Frame %s in %s at line %s" % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno), file=tracestack_data_stream)
## Dereferencing f_locals
## See: http://utcc.utoronto.ca/~cks/space/blog/python/FLocalsAndTraceFunctions
local_values = frame.f_locals
try:
values_to_hide |= find_all_values_to_hide(local_values)
code = open(frame.f_code.co_filename).readlines()
first_line = max(1, frame.f_lineno-3)
last_line = min(len(code), frame.f_lineno+3)
print("-" * 79, file=tracestack_data_stream)
for line in xrange(first_line, last_line+1):
code_line = code[line-1].rstrip()
if line == frame.f_lineno:
print("----> %4i %s" % (line, code_line), file=tracestack_data_stream)
else:
print(" %4i %s" % (line, code_line), file=tracestack_data_stream)
print("-" * 79, file=tracestack_data_stream)
except:
pass
for key, value in local_values.items():
print("\t%20s = " % key, end=' ', file=tracestack_data_stream)
try:
value = repr(value)
except Exception as err:
## We shall gracefully accept errors when repr() of
## a value fails (e.g. when we are trying to repr() a
## variable that was not fully initialized as the
## exception was raised during its __init__ call).
value = "ERROR: when representing the value: %s" % (err)
try:
print(_truncate_dynamic_string(value), file=tracestack_data_stream)
except:
print("<ERROR WHILE PRINTING VALUE>", file=tracestack_data_stream)
finally:
del frame
finally:
del stack
tracestack_data = tracestack_data_stream.getvalue()
for to_hide in values_to_hide:
## Let's hide passwords
tracestack_data = tracestack_data.replace(to_hide, '<*****>')
## Okay, start printing:
output = StringIO()
print("* %s" % www_data, file=output)
print("\n** User details", file=output)
print(client_data, file=output)
if tracestack_data:
print(tracestack_data, file=output)
return output.getvalue()
else:
return ""
def register_exception(stream='error',
req=None,
prefix='',
suffix='',
alert_admin=False,
subject=''):
"""
Log error exception to invenio.err and warning exception to invenio.log.
Errors will be logged together with client information (if req is
given).
Note: For sanity reasons, dynamic params such as PREFIX, SUFFIX and
local stack variables are checked for length, and only first 500
chars of their values are printed.
@param stream: 'error' or 'warning'
@param req: mod_python request
@param prefix: a message to be printed before the exception in
the log
@param suffix: a message to be printed before the exception in
the log
@param alert_admin: wethever to send the exception to the administrator via
email. Note this parameter is bypassed when
CFG_SITE_ADMIN_EMAIL_EXCEPTIONS is set to a value different than 1
@param subject: overrides the email subject
@return: 1 if successfully wrote to stream, 0 if not
"""
try:
## Let's extract exception information
exc_info = sys.exc_info()
exc_name = exc_info[0].__name__
output = get_pretty_traceback(
req=req, exc_info=exc_info, skip_frames=2)
if output:
## Okay, start printing:
log_stream = StringIO()
email_stream = StringIO()
print('\n', end=' ', file=email_stream)
## If a prefix was requested let's print it
if prefix:
#prefix = _truncate_dynamic_string(prefix)
print(prefix + '\n', file=log_stream)
print(prefix + '\n', file=email_stream)
print(output, file=log_stream)
print(output, file=email_stream)
## If a suffix was requested let's print it
if suffix:
#suffix = _truncate_dynamic_string(suffix)
print(suffix, file=log_stream)
print(suffix, file=email_stream)
log_text = log_stream.getvalue()
email_text = email_stream.getvalue()
if email_text.endswith('\n'):
email_text = email_text[:-1]
## Preparing the exception dump
if stream=='error':
logger_method = current_app.logger.error
else:
logger_method = current_app.logger.info
## We now have the whole trace
written_to_log = False
try:
## Let's try to write into the log.
logger_method(log_text)
written_to_log = True
except:
written_to_log = False
filename, line_no, function_name = _get_filename_and_line(exc_info)
## let's log the exception and see whether we should report it.
log = HstEXCEPTION.get_or_create(exc_name, filename, line_no)
if log.exception_should_be_notified and (
cfg['CFG_SITE_ADMIN_EMAIL_EXCEPTIONS'] > 1 or
(alert_admin and
cfg['CFG_SITE_ADMIN_EMAIL_EXCEPTIONS'] > 0) or
not written_to_log):
## If requested or if it's impossible to write in the log
from invenio.ext.email import send_email
if not subject:
subject = 'Exception (%s:%s:%s)' % (
filename, line_no, function_name)
subject = '%s at %s' % (subject, cfg['CFG_SITE_URL'])
email_text = "\n%s\n%s" % (log.pretty_notification_info,
email_text)
if not written_to_log:
email_text += """\
Note that this email was sent to you because it has been impossible to log
this exception into %s""" % os.path.join(cfg['CFG_LOGDIR'], 'invenio.' + stream)
send_email(
cfg['CFG_SITE_ADMIN_EMAIL'],
cfg['CFG_SITE_ADMIN_EMAIL'],
subject=subject,
content=email_text)
return 1
else:
return 0
except Exception as err:
print("Error in registering exception to '%s': '%s'" % (
cfg['CFG_LOGDIR'] + '/invenio.' + stream, err), file=sys.stderr)
return 0
def raise_exception(exception_type=Exception, msg='', stream='error',
req=None, prefix='', suffix='', alert_admin=False,
subject=''):
"""
Log error exception to invenio.err and warning exception to invenio.log.
Errors will be logged together with client information (if req is
given).
It does not require a previously risen exception.
Note: For sanity reasons, dynamic params such as PREFIX, SUFFIX and
local stack variables are checked for length, and only first 500
chars of their values are printed.
@param exception_type: exception type to be used internally
@param msg: error message
@param stream: 'error' or 'warning'
@param req: mod_python request
@param prefix: a message to be printed before the exception in
the log
@param suffix: a message to be printed before the exception in
the log
@param alert_admin: wethever to send the exception to the administrator via
email. Note this parameter is bypassed when
CFG_SITE_ADMIN_EMAIL_EXCEPTIONS is set to a value different than 1
@param subject: overrides the email subject
@return: 1 if successfully wrote to stream, 0 if not
"""
try:
raise exception_type(msg)
except:
return register_exception(stream=stream,
req=req,
prefix=prefix,
suffix=suffix,
alert_admin=alert_admin,
subject=subject)
def send_error_report_to_admin(header, url, time_msg,
browser, client, error,
sys_error, traceback_msg):
"""
Sends an email to the admin with client info and tracestack
"""
from_addr = '%s Alert Engine <%s>' % (
cfg['CFG_SITE_NAME'], cfg['CFG_WEBALERT_ALERT_ENGINE_EMAIL'])
to_addr = cfg['CFG_SITE_ADMIN_EMAIL']
body = """
The following error was seen by a user and sent to you.
%(contact)s
%(header)s
%(url)s
%(time)s
%(browser)s
%(client)s
%(error)s
%(sys_error)s
%(traceback)s
Please see the %(logdir)s/invenio.err for traceback details.""" % {
'header': header,
'url': url,
'time': time_msg,
'browser': browser,
'client': client,
'error': error,
'sys_error': sys_error,
'traceback': traceback_msg,
'logdir': cfg['CFG_LOGDIR'],
'contact': "Please contact %s quoting the following information:" %
(cfg['CFG_SITE_SUPPORT_EMAIL'], )}
from invenio.ext.email import send_email
send_email(from_addr, to_addr, subject="Error notification", content=body)
def _get_filename_and_line(exc_info):
"""Return the filename, the line and the function_name where
the exception happened."""
tb = exc_info[2]
exception_info = traceback.extract_tb(tb)[-1]
filename = os.path.basename(exception_info[0])
line_no = exception_info[1]
function_name = exception_info[2]
return filename, line_no, function_name
def _truncate_dynamic_string(val, maxlength=500):
"""
Return at most MAXLENGTH characters of VAL. Useful for
sanitizing dynamic variable values in the output.
"""
out = repr(val)
if len(out) > maxlength:
out = out[:maxlength] + ' [...]'
return out
def wrap_warn():
import warnings
from functools import wraps
def wrapper(showwarning):
@wraps(showwarning)
def new_showwarning(message=None, category=None, filename=None,
lineno=None, file=None, line=None):
current_app.logger.warning("* %(time)s -> WARNING: %(category)s: %(message)s (%(file)s:%(line)s)\n" % {
'time': time.strftime("%Y-%m-%d %H:%M:%S"),
'category': category,
'message': message,
'file': filename,
'line': lineno} + "** Traceback details\n" +
str(traceback.format_stack()) + "\n")
return new_showwarning
warnings.showwarning = wrapper(warnings.showwarning)
| gpl-2.0 |
hwu25/AppPkg | Applications/Python/Python-2.7.2/Lib/test/test_isinstance.py | 14 | 10083 | # Tests some corner cases with isinstance() and issubclass(). While these
# tests use new style classes and properties, they actually do whitebox
# testing of error conditions uncovered when using extension types.
import unittest
from test import test_support
import sys
class TestIsInstanceExceptions(unittest.TestCase):
# Test to make sure that an AttributeError when accessing the instance's
# class's bases is masked. This was actually a bug in Python 2.2 and
# 2.2.1 where the exception wasn't caught but it also wasn't being cleared
# (leading to an "undetected error" in the debug build). Set up is,
# isinstance(inst, cls) where:
#
# - inst isn't an InstanceType
# - cls isn't a ClassType, a TypeType, or a TupleType
# - cls has a __bases__ attribute
# - inst has a __class__ attribute
# - inst.__class__ as no __bases__ attribute
#
# Sounds complicated, I know, but this mimics a situation where an
# extension type raises an AttributeError when its __bases__ attribute is
# gotten. In that case, isinstance() should return False.
def test_class_has_no_bases(self):
class I(object):
def getclass(self):
# This must return an object that has no __bases__ attribute
return None
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertEqual(False, isinstance(I(), C()))
# Like above except that inst.__class__.__bases__ raises an exception
# other than AttributeError
def test_bases_raises_other_than_attribute_error(self):
class E(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class I(object):
def getclass(self):
return E()
__class__ = property(getclass)
class C(object):
def getbases(self):
return ()
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Here's a situation where getattr(cls, '__bases__') raises an exception.
# If that exception is not AttributeError, it should not get masked
def test_dont_mask_non_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, isinstance, I(), C())
# Like above, except that getattr(cls, '__bases__') raises an
# AttributeError, which /should/ get masked as a TypeError
def test_mask_attribute_error(self):
class I: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, isinstance, I(), C())
# These tests are similar to above, but tickle certain code paths in
# issubclass() instead of isinstance() -- really PyObject_IsSubclass()
# vs. PyObject_IsInstance().
class TestIsSubclassExceptions(unittest.TestCase):
def test_dont_mask_non_attribute_error(self):
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(RuntimeError, issubclass, C(), S())
def test_mask_attribute_error(self):
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
class S(C): pass
self.assertRaises(TypeError, issubclass, C(), S())
# Like above, but test the second branch, where the __bases__ of the
# second arg (the cls arg) is tested. This means the first arg must
# return a valid __bases__, and it's okay for it to be a normal --
# unrelated by inheritance -- class.
def test_dont_mask_non_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise RuntimeError
__bases__ = property(getbases)
self.assertRaises(RuntimeError, issubclass, B, C())
def test_mask_attribute_error_in_cls_arg(self):
class B: pass
class C(object):
def getbases(self):
raise AttributeError
__bases__ = property(getbases)
self.assertRaises(TypeError, issubclass, B, C())
# meta classes for creating abstract classes and instances
class AbstractClass(object):
def __init__(self, bases):
self.bases = bases
def getbases(self):
return self.bases
__bases__ = property(getbases)
def __call__(self):
return AbstractInstance(self)
class AbstractInstance(object):
def __init__(self, klass):
self.klass = klass
def getclass(self):
return self.klass
__class__ = property(getclass)
# abstract classes
AbstractSuper = AbstractClass(bases=())
AbstractChild = AbstractClass(bases=(AbstractSuper,))
# normal classes
class Super:
pass
class Child(Super):
pass
# new-style classes
class NewSuper(object):
pass
class NewChild(NewSuper):
pass
class TestIsInstanceIsSubclass(unittest.TestCase):
# Tests to ensure that isinstance and issubclass work on abstract
# classes and instances. Before the 2.2 release, TypeErrors were
# raised when boolean values should have been returned. The bug was
# triggered by mixing 'normal' classes and instances were with
# 'abstract' classes and instances. This case tries to test all
# combinations.
def test_isinstance_normal(self):
# normal instances
self.assertEqual(True, isinstance(Super(), Super))
self.assertEqual(False, isinstance(Super(), Child))
self.assertEqual(False, isinstance(Super(), AbstractSuper))
self.assertEqual(False, isinstance(Super(), AbstractChild))
self.assertEqual(True, isinstance(Child(), Super))
self.assertEqual(False, isinstance(Child(), AbstractSuper))
def test_isinstance_abstract(self):
# abstract instances
self.assertEqual(True, isinstance(AbstractSuper(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractSuper(), AbstractChild))
self.assertEqual(False, isinstance(AbstractSuper(), Super))
self.assertEqual(False, isinstance(AbstractSuper(), Child))
self.assertEqual(True, isinstance(AbstractChild(), AbstractChild))
self.assertEqual(True, isinstance(AbstractChild(), AbstractSuper))
self.assertEqual(False, isinstance(AbstractChild(), Super))
self.assertEqual(False, isinstance(AbstractChild(), Child))
def test_subclass_normal(self):
# normal classes
self.assertEqual(True, issubclass(Super, Super))
self.assertEqual(False, issubclass(Super, AbstractSuper))
self.assertEqual(False, issubclass(Super, Child))
self.assertEqual(True, issubclass(Child, Child))
self.assertEqual(True, issubclass(Child, Super))
self.assertEqual(False, issubclass(Child, AbstractSuper))
def test_subclass_abstract(self):
# abstract classes
self.assertEqual(True, issubclass(AbstractSuper, AbstractSuper))
self.assertEqual(False, issubclass(AbstractSuper, AbstractChild))
self.assertEqual(False, issubclass(AbstractSuper, Child))
self.assertEqual(True, issubclass(AbstractChild, AbstractChild))
self.assertEqual(True, issubclass(AbstractChild, AbstractSuper))
self.assertEqual(False, issubclass(AbstractChild, Super))
self.assertEqual(False, issubclass(AbstractChild, Child))
def test_subclass_tuple(self):
# test with a tuple as the second argument classes
self.assertEqual(True, issubclass(Child, (Child,)))
self.assertEqual(True, issubclass(Child, (Super,)))
self.assertEqual(False, issubclass(Super, (Child,)))
self.assertEqual(True, issubclass(Super, (Child, Super)))
self.assertEqual(False, issubclass(Child, ()))
self.assertEqual(True, issubclass(Super, (Child, (Super,))))
self.assertEqual(True, issubclass(NewChild, (NewChild,)))
self.assertEqual(True, issubclass(NewChild, (NewSuper,)))
self.assertEqual(False, issubclass(NewSuper, (NewChild,)))
self.assertEqual(True, issubclass(NewSuper, (NewChild, NewSuper)))
self.assertEqual(False, issubclass(NewChild, ()))
self.assertEqual(True, issubclass(NewSuper, (NewChild, (NewSuper,))))
self.assertEqual(True, issubclass(int, (long, (float, int))))
if test_support.have_unicode:
self.assertEqual(True, issubclass(str, (unicode, (Child, NewChild, basestring))))
def test_subclass_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, issubclass, str, str)
def test_isinstance_recursion_limit(self):
# make sure that issubclass raises RuntimeError before the C stack is
# blown
self.assertRaises(RuntimeError, blowstack, isinstance, '', str)
def blowstack(fxn, arg, compare_to):
# Make sure that calling isinstance with a deeply nested tuple for its
# argument will raise RuntimeError eventually.
tuple_arg = (compare_to,)
for cnt in xrange(sys.getrecursionlimit()+5):
tuple_arg = (tuple_arg,)
fxn(arg, tuple_arg)
def test_main():
test_support.run_unittest(
TestIsInstanceExceptions,
TestIsSubclassExceptions,
TestIsInstanceIsSubclass
)
if __name__ == '__main__':
test_main()
| bsd-2-clause |
parkrrr/skybot | plugins/snopes.py | 22 | 1057 | import re
from util import hook, http
search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000"
@hook.command
def snopes(inp):
".snopes <topic> -- searches snopes for an urban legend about <topic>"
search_page = http.get_html(search_url, sp_q=inp, sp_c="1")
result_urls = search_page.xpath("//a[@target='_self']/@href")
if not result_urls:
return "no matching pages found"
snopes_page = http.get_html(result_urls[0])
snopes_text = snopes_page.text_content()
claim = re.search(r"Claim: .*", snopes_text).group(0).strip()
status = re.search(r"Status: .*", snopes_text)
if status is not None:
status = status.group(0).strip()
else: # new-style statuses
status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED",
snopes_text).group(0).title()
claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace
status = re.sub(r"[\s\xa0]+", " ", status)
return "%s %s %s" % (claim, status, result_urls[0])
| unlicense |
ldoktor/autotest | frontend/afe/model_logic.py | 4 | 43717 | """
Extensions to Django's model logic.
"""
import re
import django.core.exceptions
from django.db import models as dbmodels, backend, connection
from django.db.models.sql import query
import django.db.models.sql.where
from django.utils import datastructures
from autotest.frontend.afe import readonly_connection
class ValidationError(Exception):
"""\
Data validation error in adding or updating an object. The associated
value is a dictionary mapping field names to error strings.
"""
def _wrap_with_readonly(method):
def wrapper_method(*args, **kwargs):
readonly_connection.connection().set_django_connection()
try:
return method(*args, **kwargs)
finally:
readonly_connection.connection().unset_django_connection()
wrapper_method.__name__ = method.__name__
return wrapper_method
def _quote_name(name):
"""Shorthand for connection.ops.quote_name()."""
return connection.ops.quote_name(name)
def _wrap_generator_with_readonly(generator):
"""
We have to wrap generators specially. Assume it performs
the query on the first call to next().
"""
def wrapper_generator(*args, **kwargs):
generator_obj = generator(*args, **kwargs)
readonly_connection.connection().set_django_connection()
try:
first_value = generator_obj.next()
finally:
readonly_connection.connection().unset_django_connection()
yield first_value
while True:
yield generator_obj.next()
wrapper_generator.__name__ = generator.__name__
return wrapper_generator
def _make_queryset_readonly(queryset):
"""
Wrap all methods that do database queries with a readonly connection.
"""
db_query_methods = ['count', 'get', 'get_or_create', 'latest', 'in_bulk',
'delete']
for method_name in db_query_methods:
method = getattr(queryset, method_name)
wrapped_method = _wrap_with_readonly(method)
setattr(queryset, method_name, wrapped_method)
queryset.iterator = _wrap_generator_with_readonly(queryset.iterator)
class ReadonlyQuerySet(dbmodels.query.QuerySet):
"""
QuerySet object that performs all database queries with the read-only
connection.
"""
def __init__(self, model=None, *args, **kwargs):
super(ReadonlyQuerySet, self).__init__(model, *args, **kwargs)
_make_queryset_readonly(self)
def values(self, *fields):
return self._clone(klass=ReadonlyValuesQuerySet,
setup=True, _fields=fields)
class ReadonlyValuesQuerySet(dbmodels.query.ValuesQuerySet):
def __init__(self, model=None, *args, **kwargs):
super(ReadonlyValuesQuerySet, self).__init__(model, *args, **kwargs)
_make_queryset_readonly(self)
class ExtendedManager(dbmodels.Manager):
"""\
Extended manager supporting subquery filtering.
"""
class CustomQuery(query.Query):
def __init__(self, *args, **kwargs):
super(ExtendedManager.CustomQuery, self).__init__(*args, **kwargs)
self._custom_joins = []
def clone(self, klass=None, **kwargs):
obj = super(ExtendedManager.CustomQuery, self).clone(klass)
obj._custom_joins = list(self._custom_joins)
return obj
def combine(self, rhs, connector):
super(ExtendedManager.CustomQuery, self).combine(rhs, connector)
if hasattr(rhs, '_custom_joins'):
self._custom_joins.extend(rhs._custom_joins)
def add_custom_join(self, table, condition, join_type,
condition_values=(), alias=None):
if alias is None:
alias = table
join_dict = dict(table=table,
condition=condition,
condition_values=condition_values,
join_type=join_type,
alias=alias)
self._custom_joins.append(join_dict)
@classmethod
def convert_query(self, query_set):
"""
Convert the query set's "query" attribute to a CustomQuery.
"""
# Make a copy of the query set
query_set = query_set.all()
query_set.query = query_set.query.clone(
klass=ExtendedManager.CustomQuery,
_custom_joins=[])
return query_set
class _WhereClause(object):
"""Object allowing us to inject arbitrary SQL into Django queries.
By using this instead of extra(where=...), we can still freely combine
queries with & and |.
"""
def __init__(self, clause, values=()):
self._clause = clause
self._values = values
def as_sql(self, qn=None, connection=None):
return self._clause, self._values
def relabel_aliases(self, change_map):
return
def add_join(self, query_set, join_table, join_key, join_condition='',
join_condition_values=(), join_from_key=None, alias=None,
suffix='', exclude=False, force_left_join=False):
"""Add a join to query_set.
Join looks like this:
(INNER|LEFT) JOIN <join_table> AS <alias>
ON (<this table>.<join_from_key> = <join_table>.<join_key>
and <join_condition>)
@param join_table table to join to
@param join_key field referencing back to this model to use for the join
@param join_condition extra condition for the ON clause of the join
@param join_condition_values values to substitute into join_condition
@param join_from_key column on this model to join from.
@param alias alias to use for for join
@param suffix suffix to add to join_table for the join alias, if no
alias is provided
@param exclude if true, exclude rows that match this join (will use a
LEFT OUTER JOIN and an appropriate WHERE condition)
@param force_left_join - if true, a LEFT OUTER JOIN will be used
instead of an INNER JOIN regardless of other options
"""
join_from_table = query_set.model._meta.db_table
if join_from_key is None:
join_from_key = self.model._meta.pk.name
if alias is None:
alias = join_table + suffix
full_join_key = _quote_name(alias) + '.' + _quote_name(join_key)
full_join_condition = '%s = %s.%s' % (full_join_key,
_quote_name(join_from_table),
_quote_name(join_from_key))
if join_condition:
full_join_condition += ' AND (' + join_condition + ')'
if exclude or force_left_join:
join_type = query_set.query.LOUTER
else:
join_type = query_set.query.INNER
query_set = self.CustomQuery.convert_query(query_set)
query_set.query.add_custom_join(join_table,
full_join_condition,
join_type,
condition_values=join_condition_values,
alias=alias)
if exclude:
query_set = query_set.extra(where=[full_join_key + ' IS NULL'])
return query_set
def _info_for_many_to_one_join(self, field, join_to_query, alias):
"""
@param field: the ForeignKey field on the related model
@param join_to_query: the query over the related model that we're
joining to
@param alias: alias of joined table
"""
info = {}
rhs_table = join_to_query.model._meta.db_table
info['rhs_table'] = rhs_table
info['rhs_column'] = field.column
info['lhs_column'] = field.rel.get_related_field().column
rhs_where = join_to_query.query.where
rhs_where.relabel_aliases({rhs_table: alias})
compiler = join_to_query.query.get_compiler(using=join_to_query.db)
where_clause, values = rhs_where.as_sql(
compiler.quote_name_unless_alias,
compiler.connection)
info['where_clause'] = where_clause
info['values'] = values
return info
def _info_for_many_to_many_join(self, m2m_field, join_to_query, alias,
m2m_is_on_this_model):
"""
@param m2m_field: a Django field representing the M2M relationship.
It uses a pivot table with the following structure:
this model table <---> M2M pivot table <---> joined model table
@param join_to_query: the query over the related model that we're
joining to.
@param alias: alias of joined table
"""
if m2m_is_on_this_model:
# referenced field on this model
lhs_id_field = self.model._meta.pk
# foreign key on the pivot table referencing lhs_id_field
m2m_lhs_column = m2m_field.m2m_column_name()
# foreign key on the pivot table referencing rhd_id_field
m2m_rhs_column = m2m_field.m2m_reverse_name()
# referenced field on related model
rhs_id_field = m2m_field.rel.get_related_field()
else:
lhs_id_field = m2m_field.rel.get_related_field()
m2m_lhs_column = m2m_field.m2m_reverse_name()
m2m_rhs_column = m2m_field.m2m_column_name()
rhs_id_field = join_to_query.model._meta.pk
info = {}
info['rhs_table'] = m2m_field.m2m_db_table()
info['rhs_column'] = m2m_lhs_column
info['lhs_column'] = lhs_id_field.column
# select the ID of related models relevant to this join. we can only do
# a single join, so we need to gather this information up front and
# include it in the join condition.
rhs_ids = join_to_query.values_list(rhs_id_field.attname, flat=True)
assert len(rhs_ids) == 1, ('Many-to-many custom field joins can only '
'match a single related object.')
rhs_id = rhs_ids[0]
info['where_clause'] = '%s.%s = %s' % (_quote_name(alias),
_quote_name(m2m_rhs_column),
rhs_id)
info['values'] = ()
return info
def join_custom_field(self, query_set, join_to_query, alias,
left_join=True):
"""Join to a related model to create a custom field in the given query.
This method is used to construct a custom field on the given query based
on a many-valued relationsip. join_to_query should be a simple query
(no joins) on the related model which returns at most one related row
per instance of this model.
For many-to-one relationships, the joined table contains the matching
row from the related model it one is related, NULL otherwise.
For many-to-many relationships, the joined table contains the matching
row if it's related, NULL otherwise.
"""
relationship_type, field = self.determine_relationship(
join_to_query.model)
if relationship_type == self.MANY_TO_ONE:
info = self._info_for_many_to_one_join(field, join_to_query, alias)
elif relationship_type == self.M2M_ON_RELATED_MODEL:
info = self._info_for_many_to_many_join(
m2m_field=field, join_to_query=join_to_query, alias=alias,
m2m_is_on_this_model=False)
elif relationship_type ==self.M2M_ON_THIS_MODEL:
info = self._info_for_many_to_many_join(
m2m_field=field, join_to_query=join_to_query, alias=alias,
m2m_is_on_this_model=True)
return self.add_join(query_set, info['rhs_table'], info['rhs_column'],
join_from_key=info['lhs_column'],
join_condition=info['where_clause'],
join_condition_values=info['values'],
alias=alias,
force_left_join=left_join)
def key_on_joined_table(self, join_to_query):
"""Get a non-null column on the table joined for the given query.
This analyzes the join that would be produced if join_to_query were
passed to join_custom_field.
"""
relationship_type, field = self.determine_relationship(
join_to_query.model)
if relationship_type == self.MANY_TO_ONE:
return join_to_query.model._meta.pk.column
return field.m2m_column_name() # any column on the M2M table will do
def add_where(self, query_set, where, values=()):
query_set = query_set.all()
query_set.query.where.add(self._WhereClause(where, values),
django.db.models.sql.where.AND)
return query_set
def _get_quoted_field(self, table, field):
return _quote_name(table) + '.' + _quote_name(field)
def get_key_on_this_table(self, key_field=None):
if key_field is None:
# default to primary key
key_field = self.model._meta.pk.column
return self._get_quoted_field(self.model._meta.db_table, key_field)
def escape_user_sql(self, sql):
return sql.replace('%', '%%')
def _custom_select_query(self, query_set, selects):
compiler = query_set.query.get_compiler(using=query_set.db)
sql, params = compiler.as_sql()
from_ = sql[sql.find(' FROM'):]
if query_set.query.distinct:
distinct = 'DISTINCT '
else:
distinct = ''
sql_query = ('SELECT ' + distinct + ','.join(selects) + from_)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql_query, params)
return cursor.fetchall()
def _is_relation_to(self, field, model_class):
return field.rel and field.rel.to is model_class
MANY_TO_ONE = object()
M2M_ON_RELATED_MODEL = object()
M2M_ON_THIS_MODEL = object()
def determine_relationship(self, related_model):
"""
Determine the relationship between this model and related_model.
related_model must have some sort of many-valued relationship to this
manager's model.
@returns (relationship_type, field), where relationship_type is one of
MANY_TO_ONE, M2M_ON_RELATED_MODEL, M2M_ON_THIS_MODEL, and field
is the Django field object for the relationship.
"""
# look for a foreign key field on related_model relating to this model
for field in related_model._meta.fields:
if self._is_relation_to(field, self.model):
return self.MANY_TO_ONE, field
# look for an M2M field on related_model relating to this model
for field in related_model._meta.many_to_many:
if self._is_relation_to(field, self.model):
return self.M2M_ON_RELATED_MODEL, field
# maybe this model has the many-to-many field
for field in self.model._meta.many_to_many:
if self._is_relation_to(field, related_model):
return self.M2M_ON_THIS_MODEL, field
raise ValueError('%s has no relation to %s' %
(related_model, self.model))
def _get_pivot_iterator(self, base_objects_by_id, related_model):
"""
Determine the relationship between this model and related_model, and
return a pivot iterator.
@param base_objects_by_id: dict of instances of this model indexed by
their IDs
@returns a pivot iterator, which yields a tuple (base_object,
related_object) for each relationship between a base object and a
related object. all base_object instances come from base_objects_by_id.
Note -- this depends on Django model internals.
"""
relationship_type, field = self.determine_relationship(related_model)
if relationship_type == self.MANY_TO_ONE:
return self._many_to_one_pivot(base_objects_by_id,
related_model, field)
elif relationship_type == self.M2M_ON_RELATED_MODEL:
return self._many_to_many_pivot(
base_objects_by_id, related_model, field.m2m_db_table(),
field.m2m_reverse_name(), field.m2m_column_name())
else:
assert relationship_type == self.M2M_ON_THIS_MODEL
return self._many_to_many_pivot(
base_objects_by_id, related_model, field.m2m_db_table(),
field.m2m_column_name(), field.m2m_reverse_name())
def _many_to_one_pivot(self, base_objects_by_id, related_model,
foreign_key_field):
"""
@returns a pivot iterator - see _get_pivot_iterator()
"""
filter_data = {foreign_key_field.name + '__pk__in':
base_objects_by_id.keys()}
for related_object in related_model.objects.filter(**filter_data):
# lookup base object in the dict, rather than grabbing it from the
# related object. we need to return instances from the dict, not
# fresh instances of the same models (and grabbing model instances
# from the related models incurs a DB query each time).
base_object_id = getattr(related_object, foreign_key_field.attname)
base_object = base_objects_by_id[base_object_id]
yield base_object, related_object
def _query_pivot_table(self, base_objects_by_id, pivot_table,
pivot_from_field, pivot_to_field):
"""
@param id_list list of IDs of self.model objects to include
@param pivot_table the name of the pivot table
@param pivot_from_field a field name on pivot_table referencing
self.model
@param pivot_to_field a field name on pivot_table referencing the
related model.
@returns pivot list of IDs (base_id, related_id)
"""
query = """
SELECT %(from_field)s, %(to_field)s
FROM %(table)s
WHERE %(from_field)s IN (%(id_list)s)
""" % dict(from_field=pivot_from_field,
to_field=pivot_to_field,
table=pivot_table,
id_list=','.join(str(id_) for id_
in base_objects_by_id.iterkeys()))
cursor = readonly_connection.connection().cursor()
cursor.execute(query)
return cursor.fetchall()
def _many_to_many_pivot(self, base_objects_by_id, related_model,
pivot_table, pivot_from_field, pivot_to_field):
"""
@param pivot_table: see _query_pivot_table
@param pivot_from_field: see _query_pivot_table
@param pivot_to_field: see _query_pivot_table
@returns a pivot iterator - see _get_pivot_iterator()
"""
id_pivot = self._query_pivot_table(base_objects_by_id, pivot_table,
pivot_from_field, pivot_to_field)
all_related_ids = list(set(related_id for base_id, related_id
in id_pivot))
related_objects_by_id = related_model.objects.in_bulk(all_related_ids)
for base_id, related_id in id_pivot:
yield base_objects_by_id[base_id], related_objects_by_id[related_id]
def populate_relationships(self, base_objects, related_model,
related_list_name):
"""
For each instance of this model in base_objects, add a field named
related_list_name listing all the related objects of type related_model.
related_model must be in a many-to-one or many-to-many relationship with
this model.
@param base_objects - list of instances of this model
@param related_model - model class related to this model
@param related_list_name - attribute name in which to store the related
object list.
"""
if not base_objects:
# if we don't bail early, we'll get a SQL error later
return
base_objects_by_id = dict((base_object._get_pk_val(), base_object)
for base_object in base_objects)
pivot_iterator = self._get_pivot_iterator(base_objects_by_id,
related_model)
for base_object in base_objects:
setattr(base_object, related_list_name, [])
for base_object, related_object in pivot_iterator:
getattr(base_object, related_list_name).append(related_object)
class ModelWithInvalidQuerySet(dbmodels.query.QuerySet):
"""
QuerySet that handles delete() properly for models with an "invalid" bit
"""
def delete(self):
for model in self:
model.delete()
class ModelWithInvalidManager(ExtendedManager):
"""
Manager for objects with an "invalid" bit
"""
def get_query_set(self):
return ModelWithInvalidQuerySet(self.model)
class ValidObjectsManager(ModelWithInvalidManager):
"""
Manager returning only objects with invalid=False.
"""
def get_query_set(self):
queryset = super(ValidObjectsManager, self).get_query_set()
return queryset.filter(invalid=False)
class ModelExtensions(object):
"""\
Mixin with convenience functions for models, built on top of the
default Django model functions.
"""
# TODO: at least some of these functions really belong in a custom
# Manager class
field_dict = None
# subclasses should override if they want to support smart_get() by name
name_field = None
@classmethod
def get_field_dict(cls):
if cls.field_dict is None:
cls.field_dict = {}
for field in cls._meta.fields:
cls.field_dict[field.name] = field
return cls.field_dict
@classmethod
def clean_foreign_keys(cls, data):
"""\
-Convert foreign key fields in data from <field>_id to just
<field>.
-replace foreign key objects with their IDs
This method modifies data in-place.
"""
for field in cls._meta.fields:
if not field.rel:
continue
if (field.attname != field.name and
field.attname in data):
data[field.name] = data[field.attname]
del data[field.attname]
if field.name not in data:
continue
value = data[field.name]
if isinstance(value, dbmodels.Model):
data[field.name] = value._get_pk_val()
@classmethod
def _convert_booleans(cls, data):
"""
Ensure BooleanFields actually get bool values. The Django MySQL
backend returns ints for BooleanFields, which is almost always not
a problem, but it can be annoying in certain situations.
"""
for field in cls._meta.fields:
if type(field) == dbmodels.BooleanField and field.name in data:
data[field.name] = bool(data[field.name])
# TODO(showard) - is there a way to not have to do this?
@classmethod
def provide_default_values(cls, data):
"""\
Provide default values for fields with default values which have
nothing passed in.
For CharField and TextField fields with "blank=True", if nothing
is passed, we fill in an empty string value, even if there's no
default set.
"""
new_data = dict(data)
field_dict = cls.get_field_dict()
for name, obj in field_dict.iteritems():
if data.get(name) is not None:
continue
if obj.default is not dbmodels.fields.NOT_PROVIDED:
new_data[name] = obj.default
elif (isinstance(obj, dbmodels.CharField) or
isinstance(obj, dbmodels.TextField)):
new_data[name] = ''
return new_data
@classmethod
def convert_human_readable_values(cls, data, to_human_readable=False):
"""\
Performs conversions on user-supplied field data, to make it
easier for users to pass human-readable data.
For all fields that have choice sets, convert their values
from human-readable strings to enum values, if necessary. This
allows users to pass strings instead of the corresponding
integer values.
For all foreign key fields, call smart_get with the supplied
data. This allows the user to pass either an ID value or
the name of the object as a string.
If to_human_readable=True, perform the inverse - i.e. convert
numeric values to human readable values.
This method modifies data in-place.
"""
field_dict = cls.get_field_dict()
for field_name in data:
if field_name not in field_dict or data[field_name] is None:
continue
field_obj = field_dict[field_name]
# convert enum values
if field_obj.choices:
for choice_data in field_obj.choices:
# choice_data is (value, name)
if to_human_readable:
from_val, to_val = choice_data
else:
to_val, from_val = choice_data
if from_val == data[field_name]:
data[field_name] = to_val
break
# convert foreign key values
elif field_obj.rel:
dest_obj = field_obj.rel.to.smart_get(data[field_name],
valid_only=False)
if to_human_readable:
if dest_obj.name_field is not None:
data[field_name] = getattr(dest_obj,
dest_obj.name_field)
else:
data[field_name] = dest_obj
@classmethod
def validate_field_names(cls, data):
'Checks for extraneous fields in data.'
errors = {}
field_dict = cls.get_field_dict()
for field_name in data:
if field_name not in field_dict:
errors[field_name] = 'No field of this name'
return errors
@classmethod
def prepare_data_args(cls, data, kwargs):
'Common preparation for add_object and update_object'
data = dict(data) # don't modify the default keyword arg
data.update(kwargs)
# must check for extraneous field names here, while we have the
# data in a dict
errors = cls.validate_field_names(data)
if errors:
raise ValidationError(errors)
cls.convert_human_readable_values(data)
return data
def _validate_unique(self):
"""\
Validate that unique fields are unique. Django manipulators do
this too, but they're a huge pain to use manually. Trust me.
"""
errors = {}
cls = type(self)
field_dict = self.get_field_dict()
manager = cls.get_valid_manager()
for field_name, field_obj in field_dict.iteritems():
if not field_obj.unique:
continue
value = getattr(self, field_name)
if value is None and field_obj.auto_created:
# don't bother checking autoincrement fields about to be
# generated
continue
existing_objs = manager.filter(**{field_name : value})
num_existing = existing_objs.count()
if num_existing == 0:
continue
if num_existing == 1 and existing_objs[0].id == self.id:
continue
errors[field_name] = (
'This value must be unique (%s)' % (value))
return errors
def _validate(self):
"""
First coerces all fields on this instance to their proper Python types.
Then runs validation on every field. Returns a dictionary of
field_name -> error_list.
Based on validate() from django.db.models.Model in Django 0.96, which
was removed in Django 1.0. It should reappear in a later version. See:
http://code.djangoproject.com/ticket/6845
"""
error_dict = {}
for f in self._meta.fields:
try:
python_value = f.to_python(
getattr(self, f.attname, f.get_default()))
except django.core.exceptions.ValidationError, e:
error_dict[f.name] = str(e)
continue
if not f.blank and not python_value:
error_dict[f.name] = 'This field is required.'
continue
setattr(self, f.attname, python_value)
return error_dict
def do_validate(self):
errors = self._validate()
unique_errors = self._validate_unique()
for field_name, error in unique_errors.iteritems():
errors.setdefault(field_name, error)
if errors:
raise ValidationError(errors)
# actually (externally) useful methods follow
@classmethod
def add_object(cls, data={}, **kwargs):
"""\
Returns a new object created with the given data (a dictionary
mapping field names to values). Merges any extra keyword args
into data.
"""
data = cls.prepare_data_args(data, kwargs)
data = cls.provide_default_values(data)
obj = cls(**data)
obj.do_validate()
obj.save()
return obj
def update_object(self, data={}, **kwargs):
"""\
Updates the object with the given data (a dictionary mapping
field names to values). Merges any extra keyword args into
data.
"""
data = self.prepare_data_args(data, kwargs)
for field_name, value in data.iteritems():
setattr(self, field_name, value)
self.do_validate()
self.save()
# see query_objects()
_SPECIAL_FILTER_KEYS = ('query_start', 'query_limit', 'sort_by',
'extra_args', 'extra_where', 'no_distinct')
@classmethod
def _extract_special_params(cls, filter_data):
"""
@returns a tuple of dicts (special_params, regular_filters), where
special_params contains the parameters we handle specially and
regular_filters is the remaining data to be handled by Django.
"""
regular_filters = dict(filter_data)
special_params = {}
for key in cls._SPECIAL_FILTER_KEYS:
if key in regular_filters:
special_params[key] = regular_filters.pop(key)
return special_params, regular_filters
@classmethod
def apply_presentation(cls, query, filter_data):
"""
Apply presentation parameters -- sorting and paging -- to the given
query.
@returns new query with presentation applied
"""
special_params, _ = cls._extract_special_params(filter_data)
sort_by = special_params.get('sort_by', None)
if sort_by:
assert isinstance(sort_by, list) or isinstance(sort_by, tuple)
query = query.extra(order_by=sort_by)
query_start = special_params.get('query_start', None)
query_limit = special_params.get('query_limit', None)
if query_start is not None:
if query_limit is None:
raise ValueError('Cannot pass query_start without query_limit')
# query_limit is passed as a page size
query_limit += query_start
return query[query_start:query_limit]
@classmethod
def query_objects(cls, filter_data, valid_only=True, initial_query=None,
apply_presentation=True):
"""\
Returns a QuerySet object for querying the given model_class
with the given filter_data. Optional special arguments in
filter_data include:
-query_start: index of first return to return
-query_limit: maximum number of results to return
-sort_by: list of fields to sort on. prefixing a '-' onto a
field name changes the sort to descending order.
-extra_args: keyword args to pass to query.extra() (see Django
DB layer documentation)
-extra_where: extra WHERE clause to append
-no_distinct: if True, a DISTINCT will not be added to the SELECT
"""
special_params, regular_filters = cls._extract_special_params(
filter_data)
if initial_query is None:
if valid_only:
initial_query = cls.get_valid_manager()
else:
initial_query = cls.objects
query = initial_query.filter(**regular_filters)
use_distinct = not special_params.get('no_distinct', False)
if use_distinct:
query = query.distinct()
extra_args = special_params.get('extra_args', {})
extra_where = special_params.get('extra_where', None)
if extra_where:
# escape %'s
extra_where = cls.objects.escape_user_sql(extra_where)
extra_args.setdefault('where', []).append(extra_where)
if extra_args:
query = query.extra(**extra_args)
query = query._clone(klass=ReadonlyQuerySet)
if apply_presentation:
query = cls.apply_presentation(query, filter_data)
return query
@classmethod
def query_count(cls, filter_data, initial_query=None):
"""\
Like query_objects, but retreive only the count of results.
"""
filter_data.pop('query_start', None)
filter_data.pop('query_limit', None)
query = cls.query_objects(filter_data, initial_query=initial_query)
return query.count()
@classmethod
def clean_object_dicts(cls, field_dicts):
"""\
Take a list of dicts corresponding to object (as returned by
query.values()) and clean the data to be more suitable for
returning to the user.
"""
for field_dict in field_dicts:
cls.clean_foreign_keys(field_dict)
cls._convert_booleans(field_dict)
cls.convert_human_readable_values(field_dict,
to_human_readable=True)
@classmethod
def list_objects(cls, filter_data, initial_query=None):
"""\
Like query_objects, but return a list of dictionaries.
"""
query = cls.query_objects(filter_data, initial_query=initial_query)
extra_fields = query.query.extra_select.keys()
field_dicts = [model_object.get_object_dict(extra_fields=extra_fields)
for model_object in query]
return field_dicts
@classmethod
def smart_get(cls, id_or_name, valid_only=True):
"""\
smart_get(integer) -> get object by ID
smart_get(string) -> get object by name_field
"""
if valid_only:
manager = cls.get_valid_manager()
else:
manager = cls.objects
if isinstance(id_or_name, (int, long)):
return manager.get(pk=id_or_name)
if isinstance(id_or_name, basestring) and hasattr(cls, 'name_field'):
return manager.get(**{cls.name_field : id_or_name})
raise ValueError(
'Invalid positional argument: %s (%s)' % (id_or_name,
type(id_or_name)))
@classmethod
def smart_get_bulk(cls, id_or_name_list):
invalid_inputs = []
result_objects = []
for id_or_name in id_or_name_list:
try:
result_objects.append(cls.smart_get(id_or_name))
except cls.DoesNotExist:
invalid_inputs.append(id_or_name)
if invalid_inputs:
raise cls.DoesNotExist('The following %ss do not exist: %s'
% (cls.__name__.lower(),
', '.join(invalid_inputs)))
return result_objects
def get_object_dict(self, extra_fields=None):
"""\
Return a dictionary mapping fields to this object's values. @param
extra_fields: list of extra attribute names to include, in addition to
the fields defined on this object.
"""
fields = self.get_field_dict().keys()
if extra_fields:
fields += extra_fields
object_dict = dict((field_name, getattr(self, field_name))
for field_name in fields)
self.clean_object_dicts([object_dict])
self._postprocess_object_dict(object_dict)
return object_dict
def _postprocess_object_dict(self, object_dict):
"""For subclasses to override."""
pass
@classmethod
def get_valid_manager(cls):
return cls.objects
def _record_attributes(self, attributes):
"""
See on_attribute_changed.
"""
assert not isinstance(attributes, basestring)
self._recorded_attributes = dict((attribute, getattr(self, attribute))
for attribute in attributes)
def _check_for_updated_attributes(self):
"""
See on_attribute_changed.
"""
for attribute, original_value in self._recorded_attributes.iteritems():
new_value = getattr(self, attribute)
if original_value != new_value:
self.on_attribute_changed(attribute, original_value)
self._record_attributes(self._recorded_attributes.keys())
def on_attribute_changed(self, attribute, old_value):
"""
Called whenever an attribute is updated. To be overridden.
To use this method, you must:
* call _record_attributes() from __init__() (after making the super
call) with a list of attributes for which you want to be notified upon
change.
* call _check_for_updated_attributes() from save().
"""
pass
class ModelWithInvalid(ModelExtensions):
"""
Overrides model methods save() and delete() to support invalidation in
place of actual deletion. Subclasses must have a boolean "invalid"
field.
"""
def save(self, *args, **kwargs):
first_time = (self.id is None)
if first_time:
# see if this object was previously added and invalidated
my_name = getattr(self, self.name_field)
filters = {self.name_field : my_name, 'invalid' : True}
try:
old_object = self.__class__.objects.get(**filters)
self.resurrect_object(old_object)
except self.DoesNotExist:
# no existing object
pass
super(ModelWithInvalid, self).save(*args, **kwargs)
def resurrect_object(self, old_object):
"""
Called when self is about to be saved for the first time and is actually
"undeleting" a previously deleted object. Can be overridden by
subclasses to copy data as desired from the deleted entry (but this
superclass implementation must normally be called).
"""
self.id = old_object.id
def clean_object(self):
"""
This method is called when an object is marked invalid.
Subclasses should override this to clean up relationships that
should no longer exist if the object were deleted.
"""
pass
def delete(self):
self.invalid = self.invalid
assert not self.invalid
self.invalid = True
self.save()
self.clean_object()
@classmethod
def get_valid_manager(cls):
return cls.valid_objects
class Manipulator(object):
"""
Force default manipulators to look only at valid objects -
otherwise they will match against invalid objects when checking
uniqueness.
"""
@classmethod
def _prepare(cls, model):
super(ModelWithInvalid.Manipulator, cls)._prepare(model)
cls.manager = model.valid_objects
class ModelWithAttributes(object):
"""
Mixin class for models that have an attribute model associated with them.
The attribute model is assumed to have its value field named "value".
"""
def _get_attribute_model_and_args(self, attribute):
"""
Subclasses should override this to return a tuple (attribute_model,
keyword_args), where attribute_model is a model class and keyword_args
is a dict of args to pass to attribute_model.objects.get() to get an
instance of the given attribute on this object.
"""
raise NotImplementedError
def set_attribute(self, attribute, value):
attribute_model, get_args = self._get_attribute_model_and_args(
attribute)
attribute_object, _ = attribute_model.objects.get_or_create(**get_args)
attribute_object.value = value
attribute_object.save()
def delete_attribute(self, attribute):
attribute_model, get_args = self._get_attribute_model_and_args(
attribute)
try:
attribute_model.objects.get(**get_args).delete()
except attribute_model.DoesNotExist:
pass
def set_or_delete_attribute(self, attribute, value):
if value is None:
self.delete_attribute(attribute)
else:
self.set_attribute(attribute, value)
class ModelWithHashManager(dbmodels.Manager):
"""Manager for use with the ModelWithHash abstract model class"""
def create(self, **kwargs):
raise Exception('ModelWithHash manager should use get_or_create() '
'instead of create()')
def get_or_create(self, **kwargs):
kwargs['the_hash'] = self.model._compute_hash(**kwargs)
return super(ModelWithHashManager, self).get_or_create(**kwargs)
class ModelWithHash(dbmodels.Model):
"""Superclass with methods for dealing with a hash column"""
the_hash = dbmodels.CharField(max_length=40, unique=True)
objects = ModelWithHashManager()
class Meta:
abstract = True
@classmethod
def _compute_hash(cls, **kwargs):
raise NotImplementedError('Subclasses must override _compute_hash()')
def save(self, force_insert=False, **kwargs):
"""Prevents saving the model in most cases
We want these models to be immutable, so the generic save() operation
will not work. These models should be instantiated through their the
model.objects.get_or_create() method instead.
The exception is that save(force_insert=True) will be allowed, since
that creates a new row. However, the preferred way to make instances of
these models is through the get_or_create() method.
"""
if not force_insert:
# Allow a forced insert to happen; if it's a duplicate, the unique
# constraint will catch it later anyways
raise Exception('ModelWithHash is immutable')
super(ModelWithHash, self).save(force_insert=force_insert, **kwargs)
| gpl-2.0 |
djangosporti/python-oauth2 | tests/test_oauth.py | 301 | 53269 | # -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
import mock
import httplib2
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occurred.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
def test_str(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEquals(str(e), 'OAuth error occurred.')
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_build_xoauth_string(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
token = oauth.Token('user_token', 'user_secret')
url = "https://mail.google.com/mail/b/[email protected]/imap/"
xoauth_string = oauth.build_xoauth_string(url, consumer, token)
method, oauth_url, oauth_string = xoauth_string.split(' ')
self.assertEqual("GET", method)
self.assertEqual(url, oauth_url)
returned = {}
parts = oauth_string.split(',')
for part in parts:
var, val = part.split('=')
returned[var] = val.strip('"')
self.assertEquals('HMAC-SHA1', returned['oauth_signature_method'])
self.assertEquals('user_token', returned['oauth_token'])
self.assertEquals('consumer_token', returned['oauth_consumer_key'])
self.assertTrue('oauth_signature' in returned, 'oauth_signature')
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class ReallyEqualMixin:
def failUnlessReallyEqual(self, a, b, msg=None):
self.failUnlessEqual(a, b, msg=msg)
self.failUnlessEqual(type(a), type(b), msg="a :: %r, b :: %r, %r" % (a, b, msg))
class TestFuncs(unittest.TestCase):
def test_to_unicode(self):
self.failUnlessRaises(TypeError, oauth.to_unicode, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, '\xae')
self.failUnlessRaises(TypeError, oauth.to_unicode_optional_iterator, ['\xae'])
self.failUnlessEqual(oauth.to_unicode(':-)'), u':-)')
self.failUnlessEqual(oauth.to_unicode(u'\u00ae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode('\xc2\xae'), u'\u00ae')
self.failUnlessEqual(oauth.to_unicode_optional_iterator([':-)']), [u':-)'])
self.failUnlessEqual(oauth.to_unicode_optional_iterator([u'\u00ae']), [u'\u00ae'])
class TestRequest(unittest.TestCase, ReallyEqualMixin):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
self.assertTrue(not hasattr(req, 'url') or req.url is None)
self.assertTrue(not hasattr(req, 'normalized_url') or req.normalized_url is None)
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.normalized_url, exp1)
self.assertEquals(req.url, url1)
req = oauth.Request(method, url2)
self.assertEquals(req.normalized_url, exp2)
self.assertEquals(req.url, url2)
def test_bad_url(self):
request = oauth.Request()
try:
request.url = "ftp://example.com"
self.fail("Invalid URL scheme was accepted.")
except ValueError:
pass
def test_unset_consumer_and_token(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request("GET", "http://example.com/fetch.php")
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer,
token)
self.assertEquals(consumer.key, request['oauth_consumer_key'])
self.assertEquals(token.key, request['oauth_token'])
def test_no_url_set(self):
consumer = oauth.Consumer('my_consumer_key', 'my_consumer_secret')
token = oauth.Token('my_key', 'my_secret')
request = oauth.Request()
try:
try:
request.sign_request(oauth.SignatureMethod_HMAC_SHA1(),
consumer, token)
except TypeError:
self.fail("Signature method didn't check for a normalized URL.")
except ValueError:
pass
def test_url_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
normalized_url = urlparse.urlunparse(urlparse.urlparse(url)[:3] + (None, None, None))
method = "GET"
req = oauth.Request(method, url)
self.assertEquals(req.url, url)
self.assertEquals(req.normalized_url, normalized_url)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
u'foo': u'baz',
u'bar': u'foo',
u'multi': [u'FOO',u'BAR'],
u'uni_utf8': u'\xae',
u'uni_unicode': u'\u00ae',
u'uni_unicode_2': u'åÅøØ',
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata_nonascii(self):
realm = "http://sp.example.com/"
params = {
'nonasciithing': u'q\xbfu\xe9 ,aasp u?..a.s',
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
self.failUnlessReallyEqual(req.to_postdata(), 'nonasciithing=q%C2%BFu%C3%A9%20%2Caasp%20u%3F..a.s&oauth_nonce=4572616e48616d6d65724c61686176&oauth_timestamp=137131200&oauth_consumer_key=0685bd9184jfhq22&oauth_signature_method=HMAC-SHA1&oauth_version=1.0&oauth_token=ad180jjd733klru7&oauth_signature=wOJIO9A2W5mFwDgiDvZbTSMK%252FPY%253D')
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_to_url_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
# Note: the url above already has query parameters, so append new ones with &
exp = urlparse.urlparse("%s&%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertTrue('alt' in b)
self.assertTrue('max-contacts' in b)
self.assertEquals(b['alt'], ['json'])
self.assertEquals(b['max-contacts'], ['10'])
self.assertEquals(a, b)
def test_signature_base_string_nonascii_nonutf8(self):
consumer = oauth.Consumer('consumer_token', 'consumer_secret')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\u2766,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc\xe2\x9d\xa6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = 'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
url = u'http://api.simplegeo.com:80/1.0/places/address.json?q=monkeys&category=animal&address=41+Decatur+St,+San+Francisc%E2%9D%A6,+CA'
req = oauth.Request("GET", url)
self.failUnlessReallyEqual(req.normalized_url, u'http://api.simplegeo.com/1.0/places/address.json')
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'WhufgeZKyYpKsI70GZaiDaYwl6g=')
def test_signature_base_string_with_query(self):
url = "https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
self.assertEquals(req.normalized_url, 'https://www.google.com/m8/feeds/contacts/default/full/')
self.assertEquals(req.url, 'https://www.google.com/m8/feeds/contacts/default/full/?alt=json&max-contacts=10')
normalized_params = parse_qsl(req.get_normalized_parameters())
self.assertTrue(len(normalized_params), len(params) + 2)
normalized_params = dict(normalized_params)
for key, value in params.iteritems():
if key == 'oauth_signature':
continue
self.assertEquals(value, normalized_params[key])
self.assertEquals(normalized_params['alt'], 'json')
self.assertEquals(normalized_params['max-contacts'], '10')
def test_get_normalized_parameters_empty(self):
url = "http://sp.example.com/?empty="
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='empty='
self.assertEquals(expected, res)
def test_get_normalized_parameters_duplicate(self):
url = "http://example.com/v2/search/videos?oauth_nonce=79815175&oauth_timestamp=1295397962&oauth_consumer_key=mykey&oauth_signature_method=HMAC-SHA1&q=car&oauth_version=1.0&offset=10&oauth_signature=spWLI%2FGQjid7sQVd5%2FarahRxzJg%3D"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected='oauth_consumer_key=mykey&oauth_nonce=79815175&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1295397962&oauth_version=1.0&offset=10&q=car'
self.assertEquals(expected, res)
def test_get_normalized_parameters_from_url(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
res = req.get_normalized_parameters()
expected = 'file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original'
self.assertEquals(expected, res)
def test_signing_base(self):
# example copied from
# https://github.com/ciaranj/node-oauth/blob/master/tests/oauth.js
# which in turns says that it was copied from
# http://oauth.net/core/1.0/#sig_base_example .
url = "http://photos.example.net/photos?file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=original"
req = oauth.Request("GET", url)
sm = oauth.SignatureMethod_HMAC_SHA1()
consumer = oauth.Consumer('dpf43f3p2l4k3l03', 'foo')
key, raw = sm.signing_base(req, consumer, None)
expected = 'GET&http%3A%2F%2Fphotos.example.net%2Fphotos&file%3Dvacation.jpg%26oauth_consumer_key%3Ddpf43f3p2l4k3l03%26oauth_nonce%3Dkllo9940pd9333jh%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1191242096%26oauth_token%3Dnnch734d00sl2jdk%26oauth_version%3D1.0%26size%3Doriginal'
self.assertEquals(expected, raw)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR', u'\u00ae', '\xc2\xae'],
'multi_same': ['FOO','FOO'],
'uni_utf8_bytes': '\xc2\xae',
'uni_unicode_object': u'\u00ae'
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected='multi=BAR&multi=FOO&multi=%C2%AE&multi=%C2%AE&multi_same=FOO&multi_same=FOO&oauth_consumer_key=0685bd9184jfhq22&oauth_nonce=4572616e48616d6d65724c61686176&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131200&oauth_token=ad180jjd733klru7&oauth_version=1.0&uni_unicode_object=%C2%AE&uni_utf8_bytes=%C2%AE'
self.assertEquals(expected, res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_set_signature_method(self):
consumer = oauth.Consumer('key', 'secret')
client = oauth.Client(consumer)
class Blah:
pass
try:
client.set_signature_method(Blah())
self.fail("Client.set_signature_method() accepted invalid method.")
except ValueError:
pass
m = oauth.SignatureMethod_HMAC_SHA1()
client.set_signature_method(m)
self.assertEquals(m, client.method)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
@mock.patch('oauth2.Request.make_timestamp')
@mock.patch('oauth2.Request.make_nonce')
def test_request_nonutf8_bytes(self, mock_make_nonce, mock_make_timestamp):
mock_make_nonce.return_value = 5
mock_make_timestamp.return_value = 6
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_token': tok.key,
'oauth_consumer_key': con.key
}
# If someone passes a sequence of bytes which is not ascii for
# url, we'll raise an exception as early as possible.
url = "http://sp.example.com/\x92" # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass an unicode, then we'll use it.
url = u'http://sp.example.com/\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'cMzvCkhvLL57+sTIxLITTHfkqZk=')
# And if it is a utf-8-encoded-then-percent-encoded non-ascii
# thing, we'll decode it and use it.
url = "http://sp.example.com/%E2%80%99"
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'yMLKOyNKC/DkyhUOb8DLSvceEWE=')
# Same thing with the params.
url = "http://sp.example.com/"
# If someone passes a sequence of bytes which is not ascii in
# params, we'll raise an exception as early as possible.
params['non_oauth_thing'] = '\xae', # It's actually cp1252-encoding...
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
# And if they pass a unicode, then we'll use it.
params['non_oauth_thing'] = u'\u2019'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], '0GU50m0v60CVDB5JnoBXnvvvKx4=')
# And if it is a utf-8-encoded non-ascii thing, we'll decode
# it and use it.
params['non_oauth_thing'] = '\xc2\xae'
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_signature'], 'pqOCu4qvRTiGiXB8Z61Jsey0pMM=')
# Also if there are non-utf8 bytes in the query args.
url = "http://sp.example.com/?q=\x92" # cp1252
self.assertRaises(TypeError, oauth.Request, method="GET", url=url, parameters=params)
def test_request_hash_of_body(self):
tok = oauth.Token(key="token", secret="tok-test-secret")
con = oauth.Consumer(key="consumer", secret="con-test-secret")
# Example 1a from Appendix A.1 of
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# Except that we get a differetn result than they do.
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10288510250934,
'oauth_timestamp': 1236874155,
'oauth_consumer_key': con.key
}
url = u"http://www.example.com/resource"
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 't+MX8l/0S8hdbVQL99nD0X1fPnM=')
# oauth-bodyhash.html A.1 has
# '08bUFF%2Fjmp59mWB7cSgCYBUpJ0U%3D', but I don't see how that
# is possible.
# Example 1b
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 10369470270925,
'oauth_timestamp': 1236874236,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="PUT", url=url, parameters=params, body="Hello World!", is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], 'Lve95gjOVATpfV8EL5X4nxwjKHE=')
self.failUnlessReallyEqual(req['oauth_signature'], 'CTFmrqJIGT7NsWJ42OrujahTtTc=')
# Appendix A.2
params = {
'oauth_version': "1.0",
'oauth_token': tok.key,
'oauth_nonce': 8628868109991,
'oauth_timestamp': 1238395022,
'oauth_consumer_key': con.key
}
req = oauth.Request(method="GET", url=url, parameters=params, is_form_encoded=False)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, None)
self.failUnlessReallyEqual(req['oauth_body_hash'], '2jmj7l5rSw0yVb/vlWAYkK/YBwk=')
self.failUnlessReallyEqual(req['oauth_signature'], 'Zhl++aWSP0O3/hYQ0CuBc7jv38I=')
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'DX01TdHws7OninCLK9VztNTH1M4=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
# Also if there are non-ascii chars in the URL.
url = "http://sp.example.com/\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
url = u'http://sp.example.com/\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'loFvp5xC7YbOgd9exIO6TxB7H4s=')
# Also if there are non-ascii chars in the query args.
url = "http://sp.example.com/?q=\xe2\x80\x99" # utf-8 bytes
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
url = u'http://sp.example.com/?q=\u2019' # Python unicode object
req = oauth.Request(method="GET", url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), con, tok)
self.assertEquals(req['oauth_signature'], 'IBw5mfvoCsDjgpcsVKbyvsDqQaU=')
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
tok.set_verifier('this_is_a_test_verifier')
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
self.assertEquals(tok.verifier, req['oauth_verifier'])
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_build_authenticate_header(self):
server = oauth.Server()
headers = server.build_authenticate_header('example.com')
self.assertTrue('WWW-Authenticate' in headers)
self.assertEquals('OAuth realm="example.com"',
headers['WWW-Authenticate'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request, consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def create_simple_multipart_data(self, data):
boundary = '---Boundary-%d' % random.randint(1,1000)
crlf = '\r\n'
items = []
for key, value in data.iteritems():
items += [
'--'+boundary,
'Content-Disposition: form-data; name="%s"'%str(key),
'',
str(value),
]
items += ['', '--'+boundary+'--', '']
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, crlf.join(items)
def test_init(self):
class Blah():
pass
try:
client = oauth.Client(Blah())
self.fail("Client.__init__() accepted invalid Consumer.")
except ValueError:
pass
consumer = oauth.Consumer('token', 'secret')
try:
client = oauth.Client(consumer, Blah())
self.fail("Client.__init__() accepted invalid Token.")
except ValueError:
pass
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
@mock.patch('httplib2.Http.request')
def test_multipart_post_does_not_alter_body(self, mockHttpRequest):
random_result = random.randint(1,100)
data = {
'rand-%d'%random.randint(1,100):random.randint(1,100),
}
content_type, body = self.create_simple_multipart_data(data)
client = oauth.Client(self.consumer, None)
uri = self._uri('two_legged')
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnless(ur is uri)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], body)
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'POST')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
return random_result
mockHttpRequest.side_effect = mockrequest
result = client.request(uri, 'POST', headers={'Content-Type':content_type}, body=body)
self.assertEqual(result, random_result)
@mock.patch('httplib2.Http.request')
def test_url_with_query_string(self, mockHttpRequest):
uri = 'http://example.com/foo/bar/?show=thundercats&character=snarf'
client = oauth.Client(self.consumer, None)
random_result = random.randint(1,100)
def mockrequest(cl, ur, **kw):
self.failUnless(cl is client)
self.failUnlessEqual(frozenset(kw.keys()), frozenset(['method', 'body', 'redirections', 'connection_type', 'headers']))
self.failUnlessEqual(kw['body'], '')
self.failUnlessEqual(kw['connection_type'], None)
self.failUnlessEqual(kw['method'], 'GET')
self.failUnlessEqual(kw['redirections'], httplib2.DEFAULT_MAX_REDIRECTS)
self.failUnless(isinstance(kw['headers'], dict))
req = oauth.Request.from_consumer_and_token(self.consumer, None,
http_method='GET', http_url=uri, parameters={})
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.consumer, None)
expected = parse_qsl(urlparse.urlparse(req.to_url()).query)
actual = parse_qsl(urlparse.urlparse(ur).query)
self.failUnlessEqual(len(expected), len(actual))
actual = dict(actual)
for key, value in expected:
if key not in ('oauth_signature', 'oauth_nonce', 'oauth_timestamp'):
self.failUnlessEqual(actual[key], value)
return random_result
mockHttpRequest.side_effect = mockrequest
client.request(uri, 'GET')
@mock.patch('httplib2.Http.request')
@mock.patch('oauth2.Request.from_consumer_and_token')
def test_multiple_values_for_a_key(self, mockReqConstructor, mockHttpRequest):
client = oauth.Client(self.consumer, None)
request = oauth.Request("GET", "http://example.com/fetch.php", parameters={'multi': ['1', '2']})
mockReqConstructor.return_value = request
client.request('http://whatever', 'POST', body='multi=1&multi=2')
self.failUnlessEqual(mockReqConstructor.call_count, 1)
self.failUnlessEqual(mockReqConstructor.call_args[1]['parameters'], {'multi': ['1', '2']})
self.failUnless('multi=1' in mockHttpRequest.call_args[1]['body'])
self.failUnless('multi=2' in mockHttpRequest.call_args[1]['body'])
if __name__ == "__main__":
unittest.main()
| mit |
2014c2g3/0623exam | static/Brython3.1.0-20150301-090019/Lib/weakref_1.py | 769 | 11495 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| gpl-3.0 |
zhukaixy/kbengine | kbe/src/lib/python/Lib/test/test_getopt.py | 173 | 6968 | # test_getopt.py
# David Goodger <[email protected]> 2000-08-19
from test.support import verbose, run_doctest, run_unittest, EnvironmentVarGuard
import unittest
import getopt
sentinel = object()
class GetoptTests(unittest.TestCase):
def setUp(self):
self.env = EnvironmentVarGuard()
if "POSIXLY_CORRECT" in self.env:
del self.env["POSIXLY_CORRECT"]
def tearDown(self):
self.env.__exit__()
del self.env
def assertError(self, *args, **kwargs):
self.assertRaises(getopt.GetoptError, *args, **kwargs)
def test_short_has_arg(self):
self.assertTrue(getopt.short_has_arg('a', 'a:'))
self.assertFalse(getopt.short_has_arg('a', 'a'))
self.assertError(getopt.short_has_arg, 'a', 'b')
def test_long_has_args(self):
has_arg, option = getopt.long_has_args('abc', ['abc='])
self.assertTrue(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abcd')
self.assertError(getopt.long_has_args, 'abc', ['def'])
self.assertError(getopt.long_has_args, 'abc', [])
self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde'])
def test_do_shorts(self):
opts, args = getopt.do_shorts([], 'a', 'a', [])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a1', 'a:', [])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
#self.assertEqual(opts, [('-a', '1')])
#self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, ['2'])
self.assertError(getopt.do_shorts, [], 'a1', 'a', [])
self.assertError(getopt.do_shorts, [], 'a', 'a:', [])
def test_do_longs(self):
opts, args = getopt.do_longs([], 'abc', ['abc'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
self.assertEqual(opts, [('--abc', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
self.assertEqual(opts, [('--abcd', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
# Much like the preceding, except with a non-alpha character ("-") in
# option name that precedes "="; failed in
# http://python.org/sf/126863
opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], [])
self.assertEqual(opts, [('--foo', '42')])
self.assertEqual(args, [])
self.assertError(getopt.do_longs, [], 'abc=1', ['abc'], [])
self.assertError(getopt.do_longs, [], 'abc', ['abc='], [])
def test_getopt(self):
# note: the empty string between '-a' and '--beta' is significant:
# it simulates an empty string option argument ('-a ""') on the
# command line.
cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a',
'', '--beta', 'arg1', 'arg2']
opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
self.assertEqual(opts, [('-a', '1'), ('-b', ''),
('--alpha', '2'), ('--beta', ''),
('-a', '3'), ('-a', ''), ('--beta', '')])
# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
# accounted for in the code that calls getopt().
self.assertEqual(args, ['arg1', 'arg2'])
self.assertError(getopt.getopt, cmdline, 'a:b', ['alpha', 'beta'])
def test_gnu_getopt(self):
# Test handling of GNU style scanning mode.
cmdline = ['-a', 'arg1', '-b', '1', '--alpha', '--beta=2']
# GNU style
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(args, ['arg1'])
self.assertEqual(opts, [('-a', ''), ('-b', '1'),
('--alpha', ''), ('--beta', '2')])
# recognize "-" as an argument
opts, args = getopt.gnu_getopt(['-a', '-', '-b', '-'], 'ab:', [])
self.assertEqual(args, ['-'])
self.assertEqual(opts, [('-a', ''), ('-b', '-')])
# Posix style via +
opts, args = getopt.gnu_getopt(cmdline, '+ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
# Posix style via POSIXLY_CORRECT
self.env["POSIXLY_CORRECT"] = "1"
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
def test_libref_examples(self):
s = """
Examples from the Library Reference: Doc/lib/libgetopt.tex
An example using only Unix style options:
>>> import getopt
>>> args = '-a -b -cfoo -d bar a1 a2'.split()
>>> args
['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'abc:d:')
>>> optlist
[('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
>>> args
['a1', 'a2']
Using long option names is equally easy:
>>> s = '--condition=foo --testing --output-file abc.def -x a1 a2'
>>> args = s.split()
>>> args
['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'x', [
... 'condition=', 'output-file=', 'testing'])
>>> optlist
[('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')]
>>> args
['a1', 'a2']
"""
import types
m = types.ModuleType("libreftest", s)
run_doctest(m, verbose)
def test_issue4629(self):
longopts, shortopts = getopt.getopt(['--help='], '', ['help='])
self.assertEqual(longopts, [('--help', '')])
longopts, shortopts = getopt.getopt(['--help=x'], '', ['help='])
self.assertEqual(longopts, [('--help', 'x')])
self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help'])
def test_main():
run_unittest(GetoptTests)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
SanchayanMaity/gem5 | tests/configs/realview-switcheroo-full.py | 18 | 2450 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
mem_class=DDR3_1600_x64,
cpu_classes=(AtomicSimpleCPU, TimingSimpleCPU, MinorCPU, DerivO3CPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| bsd-3-clause |
goldenbull/grpc | src/python/grpcio/tests/unit/_crust_over_core_over_links_face_interface_test.py | 5 | 6763 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Face compliance of the crust-over-core-over-gRPC-links stack."""
import collections
import unittest
import six
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc._links import service
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.core import implementations as core_implementations
from grpc.framework.crust import implementations as crust_implementations
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.links import utilities
from tests.unit import test_common as grpc_test_common
from tests.unit.framework.common import test_constants
from tests.unit.framework.interfaces.face import test_cases
from tests.unit.framework.interfaces.face import test_interfaces
class _SerializationBehaviors(
collections.namedtuple(
'_SerializationBehaviors',
('request_serializers', 'request_deserializers', 'response_serializers',
'response_deserializers',))):
pass
def _serialization_behaviors_from_test_methods(test_methods):
request_serializers = {}
request_deserializers = {}
response_serializers = {}
response_deserializers = {}
for (group, method), test_method in six.iteritems(test_methods):
request_serializers[group, method] = test_method.serialize_request
request_deserializers[group, method] = test_method.deserialize_request
response_serializers[group, method] = test_method.serialize_response
response_deserializers[group, method] = test_method.deserialize_response
return _SerializationBehaviors(
request_serializers, request_deserializers, response_serializers,
response_deserializers)
class _Implementation(test_interfaces.Implementation):
def instantiate(
self, methods, method_implementations, multi_method_implementation):
pool = logging_pool.pool(test_constants.POOL_SIZE)
servicer = crust_implementations.servicer(
method_implementations, multi_method_implementation, pool)
serialization_behaviors = _serialization_behaviors_from_test_methods(
methods)
invocation_end_link = core_implementations.invocation_end_link()
service_end_link = core_implementations.service_end_link(
servicer, test_constants.DEFAULT_TIMEOUT,
test_constants.MAXIMUM_TIMEOUT)
service_grpc_link = service.service_link(
serialization_behaviors.request_deserializers,
serialization_behaviors.response_serializers)
port = service_grpc_link.add_port('[::]:0', None)
channel = _intermediary_low.Channel('localhost:%d' % port, None)
invocation_grpc_link = invocation.invocation_link(
channel, b'localhost', None,
serialization_behaviors.request_serializers,
serialization_behaviors.response_deserializers)
invocation_end_link.join_link(invocation_grpc_link)
invocation_grpc_link.join_link(invocation_end_link)
service_grpc_link.join_link(service_end_link)
service_end_link.join_link(service_grpc_link)
service_end_link.start()
invocation_end_link.start()
invocation_grpc_link.start()
service_grpc_link.start()
generic_stub = crust_implementations.generic_stub(invocation_end_link, pool)
# TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest.
group = next(iter(methods))[0]
# TODO(nathaniel): Add a "cardinalities_by_group" attribute to
# _digest.TestServiceDigest.
cardinalities = {
method: method_object.cardinality()
for (group, method), method_object in six.iteritems(methods)}
dynamic_stub = crust_implementations.dynamic_stub(
invocation_end_link, group, cardinalities, pool)
return generic_stub, {group: dynamic_stub}, (
invocation_end_link, invocation_grpc_link, service_grpc_link,
service_end_link, pool)
def destantiate(self, memo):
(invocation_end_link, invocation_grpc_link, service_grpc_link,
service_end_link, pool) = memo
invocation_end_link.stop(0).wait()
invocation_grpc_link.stop()
service_grpc_link.begin_stop()
service_end_link.stop(0).wait()
service_grpc_link.end_stop()
invocation_end_link.join_link(utilities.NULL_LINK)
invocation_grpc_link.join_link(utilities.NULL_LINK)
service_grpc_link.join_link(utilities.NULL_LINK)
service_end_link.join_link(utilities.NULL_LINK)
pool.shutdown(wait=True)
def invocation_metadata(self):
return grpc_test_common.INVOCATION_INITIAL_METADATA
def initial_metadata(self):
return grpc_test_common.SERVICE_INITIAL_METADATA
def terminal_metadata(self):
return grpc_test_common.SERVICE_TERMINAL_METADATA
def code(self):
return beta_interfaces.StatusCode.OK
def details(self):
return grpc_test_common.DETAILS
def metadata_transmitted(self, original_metadata, transmitted_metadata):
return original_metadata is None or grpc_test_common.metadata_transmitted(
original_metadata, transmitted_metadata)
def load_tests(loader, tests, pattern):
return unittest.TestSuite(
tests=tuple(
loader.loadTestsFromTestCase(test_case_class)
for test_case_class in test_cases.test_cases(_Implementation())))
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
mujiansu/pip | pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
matthdsm/bioconda-recipes | recipes/peptide-shaker/1.16.16/peptide-shaker.py | 45 | 3272 | #!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'PeptideShaker-1.16.16.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| mit |
nguy/brawl4d | LMA/controller.py | 1 | 10240 | """ Support for LMA data display in brawl4d.
These are meant to be lightweight wrappers to coordinate data formats
understood by the lmatools package.
"""
import numpy as np
from lmatools.flashsort.autosort.LMAarrayFile import LMAdataFile
from stormdrain.bounds import Bounds, BoundsFilter
from stormdrain.data import NamedArrayDataset, indexed
from stormdrain.pipeline import Branchpoint, coroutine, ItemModifier
from stormdrain.support.matplotlib.artistupdaters import PanelsScatterController
from stormdrain.support.matplotlib.poly_lasso import LassoPayloadController
class LMAAnimator(object):
def __init__(self, duration, variable='time'):
self.tstart = time.time()
self.duration = duration
def draw_frame(self, animator, time_fraction):
pass
def init_draw(self, animator):
pass
class LMAController(object):
""" Manages bounds object with LMA-specific criteria. Convenience functions for loading LMA data.
"""
z_alt_mapping = {'z':('alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) ) }
def __init__(self, *args, **kwargs):
super(LMAController, self).__init__(*args, **kwargs)
self.bounds = Bounds(chi2=(0.0, 1.0), stations=(6, 99))
self.default_color_bounds = Bounds(parent=self.bounds, charge=(-1,1))
self.datasets = set()
self.flash_datasets = set()
def pipeline_for_dataset(self, d, panels,
names4d=('lon', 'lat', 'alt', 'time'),
transform_mapping=None,
scatter_kwargs = {}
):
""" Set 4d_names to the spatial coordinate names in d that provide
longitude, latitude, altitude, and time. Default of
lon, lat, alt, and time which are assumed to be in deg, deg, meters, seconds
entries in the scatter_kwargs dictionary are passed as kwargs to the matplotlib
scatter call.
"""
# Set up dataset -> time-height bound filter -> brancher
branch = Branchpoint([])
brancher = branch.broadcast()
# strictly speaking, z in the map projection and MSL alt aren't the same - z is somewhat distorted by the projection.
# therefore, add some padding. filtered again later after projection.
quality_filter = BoundsFilter(target=brancher, bounds=self.bounds).filter()
if transform_mapping is None:
transform_mapping = self.z_alt_mapping
# Use 'time', which is the name in panels.bounds, and not names4d[3], which should
# is linked to 'time' by transform_mapping if necessary
bound_filter = BoundsFilter(target=quality_filter, bounds=panels.bounds,
restrict_to=('time'), transform_mapping=transform_mapping)
filterer = bound_filter.filter()
d.target = filterer
# Set up brancher -> coordinate transform -> final_filter -> mutli-axis scatter updater
scatter_ctrl = PanelsScatterController(
panels=panels,
color_field=names4d[3],
default_color_bounds=self.default_color_bounds,
**scatter_kwargs)
scatter_outlet_broadcaster = scatter_ctrl.branchpoint
scatter_updater = scatter_outlet_broadcaster.broadcast()
final_bound_filter = BoundsFilter(target=scatter_updater, bounds=panels.bounds)
final_filterer = final_bound_filter.filter()
cs_transformer = panels.cs.project_points(
target=final_filterer,
x_coord='x', y_coord='y', z_coord='z',
lat_coord=names4d[1], lon_coord=names4d[0], alt_coord=names4d[2],
distance_scale_factor=1.0e-3)
branch.targets.add(cs_transformer)
# return each broadcaster so that other things can tap into results of transformation of this dataset
return branch, scatter_ctrl
@coroutine
def flash_stat_printer(self, min_points=10):
while True:
ev, fl = (yield)
template = "{0} of {1} flashes have > {3} points. Their average area = {2:5.1f} km^2"
N = len(fl)
good = (fl['n_points'] >= min_points)
N_good = len(fl[good])
area = np.mean(fl['area'][good])
print template.format(N_good, N, area, min_points)
def flash_stats_for_dataset(self, d, selection_broadcaster):
flash_stat_branchpoint = Branchpoint([self.flash_stat_printer()])
flash_stat_brancher = flash_stat_branchpoint.broadcast()
@coroutine
def flash_data_for_selection(target, flash_id_key = 'flash_id'):
""" Accepts an array of event data from the pipeline, and sends
event and flash data.
"""
while True:
ev = (yield) # array of event data
fl_dat = d.flash_data
flash_ids = set(ev[flash_id_key])
flashes = np.fromiter(
(fl for fl in fl_dat if fl[flash_id_key] in flash_ids),
dtype=fl_dat.dtype)
target.send((ev, flashes))
selection_broadcaster.targets.add(flash_data_for_selection(flash_stat_brancher))
return flash_stat_branchpoint
@indexed()
def read_dat(self, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
lma = LMAdataFile(*args, **kwargs)
stn = lma.stations # adds stations to lma.data as a side-effect
d = NamedArrayDataset(lma.data)
self.datasets.add(d)
return d
def load_dat_to_panels(self, panels, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
d = self.read_dat(*args, **kwargs)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels)
branch_to_scatter_artists = scatter_ctrl.branchpoint
# ask for a copy of the array from each selection operation, so that
# it's saved and ready for any lasso operations
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
@indexed(index_name='hdf_row_idx')
def read_hdf5(self, LMAfileHDF):
try:
import tables
except ImportError:
print "couldn't import pytables"
return None
from hdf5_lma import HDF5Dataset
# get the HDF5 table name
LMAh5 = tables.openFile(LMAfileHDF, 'r')
table_names = LMAh5.root.events._v_children.keys()
table_path = '/events/' + table_names[0]
LMAh5.close()
d = HDF5Dataset(LMAfileHDF, table_path=table_path, mode='a')
self.datasets.add(d)
if d.flash_table is not None:
print "found flash data"
return d
def load_hdf5_to_panels(self, panels, LMAfileHDF, scatter_kwargs={}):
d = self.read_hdf5(LMAfileHDF)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels, scatter_kwargs=scatter_kwargs)
branch_to_scatter_artists = scatter_ctrl.branchpoint
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(index_name='hdf_row_idx',
field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
def load_hdf5_flashes_to_panels(self, panels, hdf5dataset, min_points=10):
""" Set up a flash dataset display. The sole argument is usually the HDF5
LMA dataset returned by a call to self.load_hdf5_to_panels """
from hdf5_lma import HDF5FlashDataset
if hdf5dataset.flash_table is not None:
point_count_dtype = hdf5dataset.flash_data['n_points'].dtype
self.bounds.n_points = (min_points, np.iinfo(point_count_dtype))
flash_d = HDF5FlashDataset(hdf5dataset)
transform_mapping = {}
transform_mapping['time'] = ('start', (lambda v: (v[0], v[1])) )
transform_mapping['lat'] = ('init_lat', (lambda v: (v[0], v[1])) )
transform_mapping['lon'] = ('init_lon', (lambda v: (v[0], v[1])) )
transform_mapping['z'] = ('init_alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) )
flash_post_filter_brancher, flash_scatter_ctrl = self.pipeline_for_dataset(flash_d, panels,
transform_mapping=transform_mapping,
names4d=('init_lon', 'init_lat', 'init_alt', 'start') )
for art in flash_scatter_ctrl.artist_outlet_controllers:
# there is no time variable, but the artist updater is set to expect
# time. Patch that up.
if art.coords == ('time', 'z'):
art.coords = ('start', 'z')
# Draw flash markers in a different style
art.artist.set_edgecolor('k')
self.flash_datasets.add(flash_d)
return flash_d, flash_post_filter_brancher, flash_scatter_ctrl
class LassoChargeController(LassoPayloadController):
""" The "charge" attribute is one of {-1, 0, 1} to set
negative, unclassified, or positive charge, or None
to do nothing.
"""
charge = LassoPayloadController.Payload() | bsd-2-clause |
DirtyUnicorns/android_external_chromium-org | tools/telemetry_tools/telemetry_bootstrap.py | 24 | 5468 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstrap Chrome Telemetry by downloading all its files from SVN servers.
Requires a DEPS file to specify which directories on which SVN servers
are required to run Telemetry. Format of that DEPS file is a subset of the
normal DEPS file format[1]; currently only only the "deps" dictionary is
supported and nothing else.
Fetches all files in the specified directories using WebDAV (SVN is WebDAV under
the hood).
[1] http://dev.chromium.org/developers/how-tos/depottools#TOC-DEPS-file
"""
import imp
import logging
import os
import urllib
import urlparse
# Dummy module for DAVclient.
davclient = None
# Link to file containing the 'davclient' WebDAV client library.
_DAVCLIENT_URL = ('https://src.chromium.org/chrome/trunk/src/tools/'
'telemetry/third_party/davclient/davclient.py')
def _DownloadAndImportDAVClientModule():
"""Dynamically import davclient helper library."""
global davclient
davclient_src = urllib.urlopen(_DAVCLIENT_URL).read()
davclient = imp.new_module('davclient')
exec davclient_src in davclient.__dict__
class DAVClientWrapper():
"""Knows how to retrieve subdirectories and files from WebDAV/SVN servers."""
def __init__(self, root_url):
"""Initialize SVN server root_url, save files to local dest_dir.
Args:
root_url: string url of SVN/WebDAV server
"""
self.root_url = root_url
self.client = davclient.DAVClient(root_url)
@staticmethod
def __norm_path_keys(dict_with_path_keys):
"""Returns a dictionary with os.path.normpath called on every key."""
return dict((os.path.normpath(k), v) for (k, v) in
dict_with_path_keys.items())
def GetDirList(self, path):
"""Returns string names of all files and subdirs of path on the server."""
props = self.__norm_path_keys(self.client.propfind(path, depth=1))
# remove this path
del props[os.path.normpath(path)]
return [os.path.basename(p) for p in props.keys()]
def IsFile(self, path):
"""Returns True if the path is a file on the server, False if directory."""
props = self.__norm_path_keys(self.client.propfind(path, depth=1))
return props[os.path.normpath(path)]['resourcetype'] is None
def Traverse(self, src_path, dst_path):
"""Walks the directory hierarchy pointed to by src_path download all files.
Recursively walks src_path and saves all files and subfolders into
dst_path.
Args:
src_path: string path on SVN server to save (absolute path on server).
dest_path: string local path (relative or absolute) to save to.
"""
if self.IsFile(src_path):
if not os.path.exists(os.path.dirname(dst_path)):
logging.info('Creating %s', os.path.dirname(dst_path))
os.makedirs(os.path.dirname(dst_path))
if os.path.isfile(dst_path):
logging.info('Skipping %s', dst_path)
else:
logging.info('Saving %s to %s', self.root_url + src_path, dst_path)
urllib.urlretrieve(self.root_url + src_path, dst_path)
return
else:
for subdir in self.GetDirList(src_path):
self.Traverse(os.path.join(src_path, subdir),
os.path.join(dst_path, subdir))
def ListAllDepsPaths(deps_file):
"""Recursively returns a list of all paths indicated in this deps file.
Note that this discards information about where path dependencies come from,
so this is only useful in the context of a Chromium source checkout that has
already fetched all dependencies.
Args:
deps_file: File containing deps information to be evaluated, in the
format given in the header of this file.
Returns:
A list of string paths starting under src that are required by the
given deps file, and all of its sub-dependencies. This amounts to
the keys of the 'deps' dictionary.
"""
deps = {}
deps_includes = {}
chrome_root = os.path.dirname(__file__)
while os.path.basename(chrome_root) != 'src':
chrome_root = os.path.abspath(os.path.join(chrome_root, os.pardir))
exec open(deps_file).read()
deps_paths = deps.keys()
for path in deps_includes.keys():
# Need to localize the paths.
path = os.path.join(chrome_root, os.pardir, path)
deps_paths += ListAllDepsPaths(path)
return deps_paths
def DownloadDeps(destination_dir, url):
"""Saves all the dependencies in deps_path.
Opens and reads url, assuming the contents are in the simple DEPS-like file
format specified in the header of this file, then download all
files/directories listed to the destination_dir.
Args:
destination_dir: String path to directory to download files into.
url: URL containing deps information to be evaluated.
"""
logging.warning('Downloading deps from %s...', url)
# TODO(wiltzius): Add a parameter for which revision to pull.
_DownloadAndImportDAVClientModule()
deps = {}
deps_includes = {}
exec urllib.urlopen(url).read()
for dst_path, src_path in deps.iteritems():
full_dst_path = os.path.join(destination_dir, dst_path)
parsed_url = urlparse.urlparse(src_path)
root_url = parsed_url.scheme + '://' + parsed_url.netloc
dav_client = DAVClientWrapper(root_url)
dav_client.Traverse(parsed_url.path, full_dst_path)
for url in deps_includes.values():
DownloadDeps(destination_dir, url)
| bsd-3-clause |
troya2/pjsip | pjsip-apps/src/swig/python/test.py | 44 | 3447 | import pjsua2 as pj
import sys
import time
#
# Basic data structure test, to make sure basic struct
# and array operations work
#
def ua_data_test():
#
# AuthCredInfo
#
print "UA data types test.."
the_realm = "pjsip.org"
ci = pj.AuthCredInfo()
ci.realm = the_realm
ci.dataType = 20
ci2 = ci
assert ci.dataType == 20
assert ci2.realm == the_realm
#
# UaConfig
# See here how we manipulate std::vector
#
uc = pj.UaConfig()
uc.maxCalls = 10
uc.userAgent = "Python"
uc.nameserver = pj.StringVector(["10.0.0.1", "10.0.0.2"])
uc.nameserver.append("NS1")
uc2 = uc
assert uc2.maxCalls == 10
assert uc2.userAgent == "Python"
assert len(uc2.nameserver) == 3
assert uc2.nameserver[0] == "10.0.0.1"
assert uc2.nameserver[1] == "10.0.0.2"
assert uc2.nameserver[2] == "NS1"
print " Dumping nameservers: ",
for s in uc2.nameserver:
print s,
print ""
#
# Exception test
#
def ua_run_test_exception():
print "Exception test.."
ep = pj.Endpoint()
ep.libCreate()
got_exception = False
try:
ep.natDetectType()
except pj.Error, e:
got_exception = True
print " Got exception: status=%u, reason=%s,\n title=%s,\n srcFile=%s, srcLine=%d" % \
(e.status, e.reason, e.title, e.srcFile, e.srcLine)
assert e.status == 370050
assert e.reason.find("PJNATH_ESTUNINSERVER") >= 0
assert e.title == "pjsua_detect_nat_type()"
assert got_exception
#
# Custom log writer
#
class MyLogWriter(pj.LogWriter):
def write(self, entry):
print "This is Python:", entry.msg
#
# Testing log writer callback
#
def ua_run_log_test():
print "Logging test.."
ep_cfg = pj.EpConfig()
lw = MyLogWriter()
ep_cfg.logConfig.writer = lw
ep_cfg.logConfig.decor = ep_cfg.logConfig.decor & ~(pj.PJ_LOG_HAS_CR | pj.PJ_LOG_HAS_NEWLINE)
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libDestroy()
#
# Simple create, init, start, and destroy sequence
#
def ua_run_ua_test():
print "UA test run.."
ep_cfg = pj.EpConfig()
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libStart()
print "************* Endpoint started ok, now shutting down... *************"
ep.libDestroy()
#
# Tone generator
#
def ua_tonegen_test():
print "UA tonegen test.."
ep_cfg = pj.EpConfig()
ep = pj.Endpoint()
ep.libCreate()
ep.libInit(ep_cfg)
ep.libStart()
tonegen = pj.ToneGenerator()
tonegen.createToneGenerator()
tone = pj.ToneDesc()
tone.freq1 = 400
tone.freq2 = 600
tone.on_msec = 1000
tone.off_msec = 1000
tones = pj.ToneDescVector()
tones.append(tone)
digit = pj.ToneDigit()
digit.digit = '0'
digit.on_msec = 1000
digit.off_msec = 1000
digits = pj.ToneDigitVector()
digits.append(digit)
adm = ep.audDevManager()
spk = adm.getPlaybackDevMedia()
tonegen.play(tones, True)
tonegen.startTransmit(spk)
time.sleep(5)
tonegen.stop()
tonegen.playDigits(digits, True)
time.sleep(5)
dm = tonegen.getDigitMap()
print dm[0].digit
dm[0].freq1 = 400
dm[0].freq2 = 600
tonegen.setDigitMap(dm)
tonegen.stop()
tonegen.playDigits(digits, True)
time.sleep(5)
tonegen = None
ep.libDestroy()
#
# main()
#
if __name__ == "__main__":
ua_data_test()
ua_run_test_exception()
ua_run_log_test()
ua_run_ua_test()
ua_tonegen_test()
sys.exit(0)
| gpl-2.0 |
ashokpant/clandmark | python_interface/bin/flandmark_demo.py | 6 | 2152 | import numpy as np
import os
from fnmatch import fnmatch
from py_flandmark import PyFlandmark
from PIL import Image
import ImageDraw
import matplotlib.pyplot as plt
def rgb2gray(rgb):
"""
converts rgb array to grey scale variant
accordingly to fomula taken from wiki
(this function is missing in python)
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def read_bbox_from_txt(file_name):
"""
returns 2x2 matrix coordinates of
left upper and right lower corners
of rectangle that contains face stored
in columns of matrix
"""
f = open(file_name)
str = f.read().replace(',', ' ')
f.close()
ret = np.array(map(int,str.split()) ,dtype=np.int32)
ret = ret.reshape((2,2), order='F')
return ret
DIR = '../../../data/Images/'
JPGS = [f for f in os.listdir(DIR) if fnmatch(f, '*.jpg')]
flmrk = PyFlandmark("../../../data/flandmark_model.xml", False)
for jpg_name in JPGS:
file_name = jpg_name[:-4]
img = Image.open(DIR + jpg_name)
arr = rgb2gray(np.asarray(img))
bbox = read_bbox_from_txt(DIR + jpg_name[:-4] + '.det')
d_landmarks = flmrk.detect(arr, bbox)
n = d_landmarks.shape[1]
print "test detect method"
im = Image.fromarray(arr)
img_dr = ImageDraw.Draw(im)
img_dr.rectangle([tuple(bbox[:,0]), tuple(bbox[:,1])], outline="#FF00FF")
r = 2.
for i in xrange(n):
x = d_landmarks[0,i]
y = d_landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect method"
frame = flmrk.get_normalized_frame(arr, bbox)[0]
frame = frame.astype(np.double)
im = Image.fromarray(frame)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect_base method"
landmarks = flmrk.detect_base(frame)
im = Image.fromarray(frame)
img_dr = ImageDraw.Draw(im)
r = 2.
for i in xrange(n):
x = landmarks[0,i]
y = landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test psi method"
psi = flmrk.get_psi(frame, landmarks.astype(np.int32), bbox)
#flmrk.get_psi(d_landmarks, arr, bbox)
break | gpl-3.0 |
mzdanieltest/pex | pex/interpreter.py | 52 | 12996 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""pex support for interacting with interpreters."""
from __future__ import absolute_import
import os
import re
import subprocess
import sys
from collections import defaultdict
from pkg_resources import Distribution, Requirement, find_distributions
from .base import maybe_requirement
from .compatibility import string
from .tracer import TRACER
try:
from numbers import Integral
except ImportError:
Integral = (int, long)
# Determine in the most platform-compatible way possible the identity of the interpreter
# and its known packages.
ID_PY = b"""
import sys
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
print("%s %s %s %s" % (
subversion,
sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
setuptools_path = None
try:
import pkg_resources
except ImportError:
sys.exit(0)
requirements = {}
for item in sys.path:
for dist in pkg_resources.find_distributions(item):
requirements[str(dist.as_requirement())] = dist.location
for requirement_str, location in requirements.items():
rs = requirement_str.split('==', 2)
if len(rs) == 2:
print('%s %s %s' % (rs[0], rs[1], location))
"""
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1
HASHBANGS = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
}
@classmethod
def get_subversion(cls):
if hasattr(sys, 'pypy_version_info'):
subversion = 'PyPy'
elif sys.platform.startswith('java'):
subversion = 'Jython'
else:
subversion = 'CPython'
return subversion
@classmethod
def get(cls):
return cls(cls.get_subversion(), sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def from_id_string(cls, id_string):
values = id_string.split()
if len(values) != 4:
raise cls.InvalidError("Invalid id string: %s" % id_string)
return cls(str(values[0]), int(values[1]), int(values[2]), int(values[3]))
@classmethod
def from_path(cls, dirname):
interp, version = dirname.split('-')
major, minor, patch = version.split('.')
return cls(str(interp), int(major), int(minor), int(patch))
def __init__(self, interpreter, major, minor, patch):
for var in (major, minor, patch):
assert isinstance(var, Integral)
self._interpreter = interpreter
self._version = (major, minor, patch)
@property
def interpreter(self):
return self._interpreter
@property
def version(self):
return self._version
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self._interpreter, version='.'.join(map(str, self._version)))
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.HASHBANGS.get(self.interpreter, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.6', '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
return '%s-%s.%s.%s' % (self._interpreter,
self._version[0], self._version[1], self._version[2])
def __repr__(self):
return 'PythonIdentity(%r, %s, %s, %s)' % (
self._interpreter, self._version[0], self._version[1], self._version[2])
def __eq__(self, other):
return all([isinstance(other, PythonIdentity),
self.interpreter == other.interpreter,
self.version == other.version])
def __hash__(self):
return hash((self._interpreter, self._version))
class PythonInterpreter(object):
REGEXEN = (
re.compile(r'jython$'),
# NB: OSX ships python binaries named Python so we allow for capital-P.
re.compile(r'[Pp]ython$'),
re.compile(r'python[23].[0-9]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
CACHE = {} # memoize executable => PythonInterpreter
try:
# Versions of distribute prior to the setuptools merge would automatically replace
# 'setuptools' requirements with 'distribute'. It provided the 'replacement' kwarg
# to toggle this, but it was removed post-merge.
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0', replacement=False)
except TypeError:
COMPATIBLE_SETUPTOOLS = Requirement.parse('setuptools>=1.0')
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@classmethod
def all(cls, paths=None):
if paths is None:
paths = os.getenv('PATH', '').split(':')
return cls.filter(cls.find(paths))
@classmethod
def _parse_extras(cls, output_lines):
def iter_lines():
for line in output_lines:
try:
dist_name, dist_version, location = line.split()
except ValueError:
raise cls.IdentificationError('Could not identify requirement: %s' % line)
yield ((dist_name, dist_version), location)
return dict(iter_lines())
@classmethod
def _from_binary_internal(cls, path_extras):
def iter_extras():
for item in sys.path + list(path_extras):
for dist in find_distributions(item):
if dist.version:
yield ((dist.key, dist.version), dist.location)
return cls(sys.executable, PythonIdentity.get(), dict(iter_extras()))
@classmethod
def _from_binary_external(cls, binary, path_extras):
environ = cls.sanitized_environment()
environ['PYTHONPATH'] = ':'.join(path_extras)
po = subprocess.Popen(
[binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
env=environ)
so, _ = po.communicate(ID_PY)
output = so.decode('utf8').splitlines()
if len(output) == 0:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
identity, extras = output[0], output[1:]
return cls(
binary,
PythonIdentity.from_id_string(identity),
extras=cls._parse_extras(extras))
@classmethod
def expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return [os.path.join(path, fn) for fn in os.listdir(path)]
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
"""
paths = os.getenv('PATH', '').split(':')
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if hashbang == basefile:
try:
return cls.from_binary(fn)
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
@classmethod
def from_binary(cls, binary, path_extras=None):
path_extras = path_extras or ()
if binary not in cls.CACHE:
if binary == sys.executable:
cls.CACHE[binary] = cls._from_binary_internal(path_extras)
else:
cls.CACHE[binary] = cls._from_binary_external(binary, path_extras)
return cls.CACHE[binary]
@classmethod
def find(cls, paths):
"""
Given a list of files or directories, try to detect python interpreters amongst them.
Returns a list of PythonInterpreter objects.
"""
pythons = []
for path in paths:
for fn in cls.expand_path(path):
basefile = os.path.basename(fn)
if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):
try:
pythons.append(cls.from_binary(fn))
except Exception as e:
TRACER.log('Could not identify %s: %s' % (fn, e))
continue
return pythons
@classmethod
def filter(cls, pythons):
"""
Given a map of python interpreters in the format provided by PythonInterpreter.find(),
filter out duplicate versions and versions we would prefer not to use.
Returns a map in the same format as find.
"""
good = []
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 6 or
version[MAJOR] == 3 and version[MINOR] >= 2)
all_versions = set(interpreter.identity.version for interpreter in pythons)
good_versions = filter(version_filter, all_versions)
for version in good_versions:
# For each candidate, use the latest version we find on the filesystem.
candidates = defaultdict(list)
for interp in pythons:
if interp.identity.version == version:
candidates[interp.identity.interpreter].append(interp)
for interp_class in candidates:
candidates[interp_class].sort(
key=lambda interp: os.path.getmtime(interp.binary), reverse=True)
good.append(candidates[interp_class].pop(0))
return good
@classmethod
def sanitized_environment(cls):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.6/2.7 breaks.
env_copy = os.environ.copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
@classmethod
def replace(cls, requirement):
self = cls.get()
if self.identity.matches(requirement):
return False
for pi in cls.all():
if pi.identity.matches(requirement):
break
else:
raise cls.InterpreterNotFound('Could not find interpreter matching filter!')
os.execve(pi.binary, [pi.binary] + sys.argv, cls.sanitized_environment())
def __init__(self, binary, identity, extras=None):
"""Construct a PythonInterpreter.
You should probably PythonInterpreter.from_binary instead.
:param binary: The full path of the python binary.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
:param extras: A mapping from (dist.key, dist.version) to dist.location
of the extras associated with this interpreter.
"""
self._binary = os.path.realpath(binary)
self._extras = extras or {}
self._identity = identity
def with_extra(self, key, version, location):
extras = self._extras.copy()
extras[(key, version)] = location
return self.__class__(self._binary, self._identity, extras)
@property
def extras(self):
return self._extras.copy()
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def satisfies(self, capability):
if not isinstance(capability, list):
raise TypeError('Capability must be a list, got %s' % type(capability))
return not any(self.get_location(req) is None for req in capability)
def get_location(self, req):
req = maybe_requirement(req)
for dist, location in self.extras.items():
dist_name, dist_version = dist
if req.key == dist_name and dist_version in req:
return location
def __hash__(self):
return hash((self._binary, self._identity))
def __eq__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return (self._binary, self._identity) == (other._binary, other._identity)
def __lt__(self, other):
if not isinstance(other, PythonInterpreter):
return False
return self.version < other.version
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__, self._binary, self._identity, self._extras)
| apache-2.0 |
apache/bloodhound | trac/trac/tests/core.py | 2 | 13792 | # -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <[email protected]>
from trac.core import *
import unittest
class ITest(Interface):
def test():
"""Dummy function."""
class IOtherTest(Interface):
def other_test():
"""Other dummy function."""
class ComponentTestCase(unittest.TestCase):
def setUp(self):
from trac.core import ComponentManager, ComponentMeta
self.compmgr = ComponentManager()
# Make sure we have no external components hanging around in the
# component registry
self.old_registry = ComponentMeta._registry
ComponentMeta._registry = {}
def tearDown(self):
# Restore the original component registry
from trac.core import ComponentMeta
ComponentMeta._registry = self.old_registry
def test_base_class_not_registered(self):
"""
Make sure that the Component base class does not appear in the component
registry.
"""
from trac.core import ComponentMeta
assert Component not in ComponentMeta._components
self.assertRaises(TracError, self.compmgr.__getitem__, Component)
def test_abstract_component_not_registered(self):
"""
Make sure that a Component class marked as abstract does not appear in
the component registry.
"""
from trac.core import ComponentMeta
class AbstractComponent(Component):
abstract = True
assert AbstractComponent not in ComponentMeta._components
self.assertRaises(TracError, self.compmgr.__getitem__,
AbstractComponent)
def test_unregistered_component(self):
"""
Make sure the component manager refuses to manage classes not derived
from `Component`.
"""
class NoComponent(object):
pass
self.assertRaises(TracError, self.compmgr.__getitem__, NoComponent)
def test_component_registration(self):
"""
Verify that classes derived from `Component` are managed by the
component manager.
"""
class ComponentA(Component):
pass
assert self.compmgr[ComponentA]
assert ComponentA(self.compmgr)
def test_component_identity(self):
"""
Make sure instantiating a component multiple times just returns the
same instance again.
"""
class ComponentA(Component):
pass
c1 = ComponentA(self.compmgr)
c2 = ComponentA(self.compmgr)
assert c1 is c2, 'Expected same component instance'
c2 = self.compmgr[ComponentA]
assert c1 is c2, 'Expected same component instance'
def test_component_initializer(self):
"""
Makes sure that a components' `__init__` method gets called.
"""
class ComponentA(Component):
def __init__(self):
self.data = 'test'
self.assertEqual('test', ComponentA(self.compmgr).data)
ComponentA(self.compmgr).data = 'newtest'
self.assertEqual('newtest', ComponentA(self.compmgr).data)
def test_inherited_component_initializer(self):
"""
Makes sure that a the `__init__` method of a components' super-class
gets called if the component doesn't override it.
"""
class ComponentA(Component):
def __init__(self):
self.data = 'foo'
class ComponentB(ComponentA):
def __init__(self):
self.data = 'bar'
class ComponentC(ComponentB):
pass
self.assertEqual('bar', ComponentC(self.compmgr).data)
ComponentC(self.compmgr).data = 'baz'
self.assertEqual('baz', ComponentC(self.compmgr).data)
def test_implements_called_outside_classdef(self):
"""
Verify that calling implements() outside a class definition raises an
`AssertionError`.
"""
try:
implements()
except AssertionError:
pass
else:
self.fail('Expected AssertionError')
def test_implements_multiple(self):
"""
Verify that a component "implementing" an interface more than once
(e.g. through inheritance) is not called more than once from an
extension point.
"""
log = []
class Parent(Component):
abstract = True
implements(ITest)
class Child(Parent):
implements(ITest)
def test(self):
log.append("call")
class Other(Component):
tests = ExtensionPoint(ITest)
for test in Other(self.compmgr).tests:
test.test()
self.assertEqual(["call"], log)
def test_attribute_access(self):
"""
Verify that accessing undefined attributes on components raises an
`AttributeError`.
"""
class ComponentA(Component):
pass
comp = ComponentA(self.compmgr)
try:
comp.foo
self.fail('Expected AttributeError')
except AttributeError:
pass
def test_nonconforming_extender(self):
"""
Verify that accessing a method of a declared extension point interface
raises a normal `AttributeError` if the component does not implement
the method.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
tests = iter(ComponentA(self.compmgr).tests)
try:
tests.next().test()
self.fail('Expected AttributeError')
except AttributeError:
pass
def test_extension_point_with_no_extension(self):
"""
Verify that accessing an extension point with no extenders returns an
empty list.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
tests = iter(ComponentA(self.compmgr).tests)
self.assertRaises(StopIteration, tests.next)
def test_extension_point_with_one_extension(self):
"""
Verify that a single component extending an extension point can be
accessed through the extension point attribute of the declaring
component.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
def test(self):
return 'x'
tests = iter(ComponentA(self.compmgr).tests)
self.assertEquals('x', tests.next().test())
self.assertRaises(StopIteration, tests.next)
def test_extension_point_with_two_extensions(self):
"""
Verify that two components extending an extension point can be accessed
through the extension point attribute of the declaring component.
"""
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class ComponentB(Component):
implements(ITest)
def test(self):
return 'x'
class ComponentC(Component):
implements(ITest)
def test(self):
return 'y'
results = [test.test() for test in ComponentA(self.compmgr).tests]
self.assertEquals(['x', 'y'], sorted(results))
def test_inherited_extension_point(self):
"""
Verify that extension points are inherited to sub-classes.
"""
class BaseComponent(Component):
tests = ExtensionPoint(ITest)
class ConcreteComponent(BaseComponent):
pass
class ExtendingComponent(Component):
implements(ITest)
def test(self):
return 'x'
tests = iter(ConcreteComponent(self.compmgr).tests)
self.assertEquals('x', tests.next().test())
self.assertRaises(StopIteration, tests.next)
def test_inherited_implements(self):
"""
Verify that a component with a super-class implementing an extension
point interface is also registered as implementing that interface.
"""
class BaseComponent(Component):
implements(ITest)
abstract = True
class ConcreteComponent(BaseComponent):
pass
from trac.core import ComponentMeta
assert ConcreteComponent in ComponentMeta._registry.get(ITest, [])
def test_inherited_implements_multilevel(self):
"""
Verify that extension point interfaces are inherited for more than
one level of inheritance.
"""
class BaseComponent(Component):
implements(ITest)
abstract = True
class ChildComponent(BaseComponent):
implements(IOtherTest)
abstract = True
class ConcreteComponent(ChildComponent):
pass
from trac.core import ComponentMeta
assert ConcreteComponent in ComponentMeta._registry.get(ITest, [])
assert ConcreteComponent in ComponentMeta._registry.get(IOtherTest, [])
def test_component_manager_component(self):
"""
Verify that a component manager can itself be a component with its own
extension points.
"""
from trac.core import ComponentManager
class ManagerComponent(ComponentManager, Component):
tests = ExtensionPoint(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
class Extender(Component):
implements(ITest)
def test(self):
return 'x'
mgr = ManagerComponent('Test', 42)
assert id(mgr) == id(mgr[ManagerComponent])
tests = iter(mgr.tests)
self.assertEquals('x', tests.next().test())
self.assertRaises(StopIteration, tests.next)
def test_component_manager_component_isolation(self):
"""
Verify that a component manager that is also a component will only
be listed in extension points for components instantiated in
its scope.
See bh:comment:5:ticket:438 and #11121
"""
from trac.core import ComponentManager
class ManagerComponent(ComponentManager, Component):
tests = ExtensionPoint(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
class YetAnotherManagerComponent(ComponentManager, Component):
implements(ITest)
def __init__(self, foo, bar):
ComponentManager.__init__(self)
self.foo, self.bar = foo, bar
# ITest methods
def test(self):
return self.foo + self.bar
class ComponentA(Component):
tests = ExtensionPoint(ITest)
class Extender(Component):
implements(ITest)
def test(self):
return 'x'
mgr = ManagerComponent('Test', 42)
yamc = YetAnotherManagerComponent('y', 'z')
assert yamc[ManagerComponent] is None
assert mgr[YetAnotherManagerComponent] is None
assert yamc[ComponentManager] is None
assert self.compmgr[YetAnotherManagerComponent] is None
assert mgr[ComponentManager] is None
assert self.compmgr[ManagerComponent] is None
self.assertTrue(any(c.__class__ is YetAnotherManagerComponent
for c in ComponentA(yamc).tests))
self.assertFalse(any(c.__class__ is YetAnotherManagerComponent
for c in ComponentA(self.compmgr).tests))
self.assertFalse(any(c.__class__ is YetAnotherManagerComponent
for c in ComponentA(mgr).tests))
self.assertFalse(any(c.__class__ is ManagerComponent
for c in ComponentA(yamc).tests))
self.assertFalse(any(c.__class__ is YetAnotherManagerComponent
for c in mgr.tests))
results = [test.test() for test in ComponentA(yamc).tests]
self.assertEquals(['x', 'yz'], sorted(results))
results = [test.test() for test in ComponentA(self.compmgr).tests]
self.assertEquals(['x'], sorted(results))
results = [test.test() for test in ComponentA(mgr).tests]
self.assertEquals(['x'], sorted(results))
results = [test.test() for test in mgr.tests]
self.assertEquals(['x'], sorted(results))
def test_instantiation_doesnt_enable(self):
"""
Make sure that a component disabled by the ComponentManager is not
implicitly enabled by instantiating it directly.
"""
from trac.core import ComponentManager
class DisablingComponentManager(ComponentManager):
def is_component_enabled(self, cls):
return False
class ComponentA(Component):
pass
mgr = DisablingComponentManager()
instance = ComponentA(mgr)
self.assertEqual(None, mgr[ComponentA])
def suite():
return unittest.makeSuite(ComponentTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/applyhost/workflows/update_instance.py | 11 | 5708 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.utils import filters
INDEX_URL = "horizon:projects:instances:index"
ADD_USER_URL = "horizon:projects:instances:create_user"
INSTANCE_SEC_GROUP_SLUG = "update_security_groups"
class UpdateInstanceSecurityGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateInstanceSecurityGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve security group list. '
'Please try again later.')
context = args[0]
instance_id = context.get('instance_id', '')
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = 'member'
# Get list of available security groups
all_groups = []
try:
all_groups = api.network.security_group_list(request)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
instance_groups = []
try:
instance_groups = api.network.server_security_groups(request,
instance_id)
except Exception:
exceptions.handle(request, err_msg)
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = [group.id
for group in instance_groups]
def handle(self, request, data):
instance_id = data['instance_id']
wanted_groups = map(filters.get_int_or_uuid, data['wanted_groups'])
try:
api.network.server_update_security_groups(request, instance_id,
wanted_groups)
except Exception as e:
exceptions.handle(request, str(e))
return False
return True
class Meta:
name = _("Security Groups")
slug = INSTANCE_SEC_GROUP_SLUG
class UpdateInstanceSecurityGroups(workflows.UpdateMembersStep):
action_class = UpdateInstanceSecurityGroupsAction
help_text = _("Add and remove security groups to this project "
"from the list of available security groups.")
available_list_title = _("All Security Groups")
members_list_title = _("Instance Security Groups")
no_available_text = _("No security groups found.")
no_members_text = _("No security groups enabled.")
show_roles = False
depends_on = ("instance_id",)
contributes = ("wanted_groups",)
def contribute(self, data, context):
request = self.workflow.request
if data:
field_name = self.get_member_field_name('member')
context["wanted_groups"] = request.POST.getlist(field_name)
return context
class UpdateInstanceInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"),
max_length=255)
def handle(self, request, data):
try:
api.nova.server_update(request,
data['instance_id'],
data['name'])
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class Meta:
name = _("Info")
slug = 'instance_info'
help_text = _("Edit the instance details.")
class UpdateInstanceInfo(workflows.Step):
action_class = UpdateInstanceInfoAction
depends_on = ("instance_id",)
contributes = ("name",)
class UpdateInstance(workflows.Workflow):
slug = "update_instance"
name = _("Edit Instance")
finalize_button_name = _("Save")
success_message = _('Modified instance "%s".')
failure_message = _('Unable to modify instance "%s".')
success_url = "horizon:project:instances:index"
default_steps = (UpdateInstanceInfo,
UpdateInstanceSecurityGroups)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown instance')
# NOTE(kspear): nova doesn't support instance security group management
# by an admin. This isn't really the place for this code,
# but the other ways of special-casing this are even messier.
class AdminUpdateInstance(UpdateInstance):
success_url = "horizon:admin:instances:index"
default_steps = (UpdateInstanceInfo,)
| gpl-2.0 |
kans/birgo | deps/breakpad/src/tools/gyp/test/rules/gyptest-default.py | 137 | 1063 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
Hello from program.c
Hello from function1.in
Hello from function2.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
expect = """\
Hello from program.c
Hello from function3.in
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('program2', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file1.out', "Hello from file1.in\n")
test.must_match('relocate/src/subdir2/file2.out', "Hello from file2.in\n")
test.pass_test()
| apache-2.0 |
gurneyalex/hr | __unported__/hr_resume/__init__.py | 28 | 1047 | # -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import hr_resume
from . import report
| agpl-3.0 |
bsmithers/CLIgraphs | histogram.py | 1 | 8515 | #!/usr/bin/env python2
from __future__ import division
import itertools
import math
import sys
import numpy
import scipy.stats
import cligraph
import utils
"""
TODO:
- Auto-detect number of bins
- Fixed width or variable width bins
- Stacked bins, overlapped bins or bins next to each other
- Change which side of bin is open (default: bins are half-open, closed on left, except final bin
which is closed both sides)
"""
class Histogram(cligraph.CLIGraph):
def __init__(self, **kwargs):
super(Histogram, self).__init__(**kwargs)
self.data = []
self.data_params = []
def check_args(self, cli_args, inputs):
super(Histogram, self).check_args(cli_args, inputs)
self.fields = utils.get_columns_from_string(cli_args.field)
self.colours = itertools.cycle(cli_args.colours.split(','))
self.markers = itertools.cycle(cli_args.markers)
self.alphas = utils.map_csv_to_cycle(cli_args.alpha, float)
self.histtypes = itertools.cycle(cli_args.hist_type.split(','))
if cli_args.legends:
self.legends = itertools.cycle(cli_args.legends)
else:
self.legends = itertools.cycle([None])
# Should we store all data and render only after reading everything?
self.store = False
if cli_args.unify_bins:
self.store = True
# Set bin defaults if none given
if not cli_args.bins and not cli_args.bin_size:
cli_args.bins = 10
return bool(self.fields) and bool(self.alphas)
def get_parser(self):
parser = super(Histogram, self).get_parser()
# Inputs
parser.add_argument('-f', '--field', help='Column to read values from. (1-based indexing). \
Unix cut format for multiple columns. Default = 1', default='1')
# Histogram setup
parser.add_argument('--normed', help='Normalise frequency?', action="store_true",
default=False)
parser.add_argument("--cumulative", help="Cumulative Frequency? Default=0",
action="store_true", default=False)
parser.add_argument("--logscale", help="Use a logarithmic y-axs", action="store_true",
default=False)
parser.add_argument("--legends", nargs="+", help="Dataset legends", default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--bins', help='Number of bins. If not given and bin-size not \
given, this will default to 10', type=int)
group.add_argument('-z', '--bin-size', help='Size of each bin', type=float)
parser.add_argument('-u', '--unify-bins', action="store_true", default=False,
help='Unify bin sizes across different input sources')
parser.add_argument('--disable-bin-offset', help="By default, bins are offset by half their\
width to help bins straddle integer values for example",
action="store_true", default=False)
# Visual
parser.add_argument('-c', '--colours', default='r,g,b,c,y,m,k')
parser.add_argument('-m', '--markers', default=' ')
parser.add_argument('-a', '--alpha', default='0.5')
parser.add_argument('-y', '--hist-type', default='bar')
return parser
def input_started_hook(self, axes, cli_args, inp, inp_index):
"""
Setup data structures
"""
if not self.store:
self.data = []
self.data_params = []
for _ in self.fields:
self.data.append([])
self.data_params.append({'min': float('inf'), 'max': float('-inf')})
def input_ended_hook(self, axes, cli_args, inp, inp_index):
"""
Draw histogram at end of input unless we have to store data (e.g. for bin calculation)
"""
if self.store:
return
self.__draw_histogram(axes, cli_args)
def process_input_by_fields(self, axes, cli_args, inp, inp_index, fields):
"""
Store value for each dataset
"""
for index, column in enumerate(self.fields):
value = float(fields[column])
if self.store:
index = inp_index * len(self.fields) + index
# Store min/max values for bin work
self.data_params[index]['min'] = min(value, self.data_params[index]['min'])
self.data_params[index]['max'] = max(value, self.data_params[index]['max'])
self.data[index].append(float(fields[column]))
def process_input(self, axes, cli_args, inputs):
"""
If we are doing bin-size auto detection and require consist bin size
across different inputs, we will have to read all data first before
we can process
"""
super(Histogram, self).process_input(axes, cli_args, inputs)
if self.store:
self.__draw_histogram(axes, cli_args)
def apply_lables_and_titles(self, fig, axes, cli_args):
"""
Add legend if we have them
TODO: This can probably by done more generally, just have to be careful about
plots with multiple axes.
"""
super(Histogram, self).apply_lables_and_titles(fig, axes, cli_args)
if cli_args.legends:
axes.legend()
def __draw_histogram(self, axes, cli_args):
"""
Plot histograms for all datasets in current data
"""
for index, dataset in enumerate(self.data):
bins = self.__get_bins(cli_args, index)
axes.hist(dataset, bins, facecolor=self.colours.next(), alpha=self.alphas.next(),
normed=cli_args.normed, cumulative=cli_args.cumulative,
log=cli_args.logscale, label=self.legends.next(), hatch=self.markers.next(),
histtype=self.histtypes.next())
def __get_bins(self, cli_args, index):
"""
Get the bin histogram parameter for the data at the given index. Use the supplied
number of bins if given. Otherwise, calculate based on the supplied bin width.
"""
# Short-circuit if we are given number of bins and not using equal bins
if cli_args.bins and not self.store:
return cli_args.bins
# Get the minimum and maximum values either for this dataset or for all datasets
# if we are post-processing
min_val = self.data_params[index]['min']
max_val = self.data_params[index]['max']
if self.store:
min_val = min([self.data_params[i]['min'] for i in range(0, len(self.data_params))])
max_val = max([self.data_params[i]['max'] for i in range(0, len(self.data_params))])
# For a fixed number of bins, do a linear fit. Otherwise, use a range with bin size
if cli_args.bins:
# Fit one extra value to include right edge (same as normal histogram behaviour)
return numpy.linspace(min_val, max_val, cli_args.bins + 1)
# Compute bins. Do not use range as values may be floats.
# Lowest bin should be the largest multiple of bin_size that is <= min_val
# Highest bin should be smallest multiple of bin_size that is >= max_val
bins = []
i = math.floor(min_val / cli_args.bin_size) * cli_args.bin_size
# By default, bits are offset by half their width from the lowest value rather
# than by their full width
if not cli_args.disable_bin_offset:
i -= cli_args.bin_size / 2
else:
i -= cli_args.bin_size
while i <= max_val:
bins.append(i)
i += cli_args.bin_size
bins.append(i) # Add final bin
# Combine offscreen bins for faster renders
if cli_args.min_x and cli_args.min_x > min_val:
first_onscreen = max([index for index, b in enumerate(bins) if b <= cli_args.min_x])
# Include the first bin so that this captures everything offscren
if first_onscreen >= 2:
bins = [bins[0]] + bins[first_onscreen:]
if cli_args.max_x and cli_args.max_x < max_val:
last_onscreen = min([index for index, b in enumerate(bins) if b > cli_args.max_x])
if last_onscreen < len(bins) - 1:
bins = bins[:last_onscreen] + [bins[-1]]
return bins
if __name__ == '__main__':
hist = Histogram(grid_default_on=True)
hist.graphify()
| agpl-3.0 |
vivekkodu/robotframework-selenium2library | src/Selenium2Library/keywords/_selectelement.py | 9 | 14906 | from selenium.webdriver.support.ui import Select
from keywordgroup import KeywordGroup
class _SelectElementKeywords(KeywordGroup):
# Public
def get_list_items(self, locator):
"""Returns the values in the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options(locator)
return self._get_labels_for_options(options)
def get_selected_list_label(self, locator):
"""Returns the visible label of the selected element from the select list identified by `locator`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.text
def get_selected_list_labels(self, locator):
"""Returns the visible labels of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_labels_for_options(options)
def get_selected_list_value(self, locator):
"""Returns the value of the selected element from the select list identified by `locator`.
Return value is read from `value` attribute of the selected element.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select = self._get_select_list(locator)
return select.first_selected_option.get_attribute('value')
def get_selected_list_values(self, locator):
"""Returns the values of selected elements (as a list) from the select list identified by `locator`.
Fails if there is no selection.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
select, options = self._get_select_list_options_selected(locator)
if len(options) == 0:
raise ValueError("Select list with locator '%s' does not have any selected values")
return self._get_values_for_options(options)
def list_selection_should_be(self, locator, *items):
"""Verifies the selection of select list identified by `locator` is exactly `*items`.
If you want to test that no option is selected, simply give no `items`.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) [ %s ]" % " | ".join(items) or "no options"
self._info("Verifying list '%s' has %s selected." % (locator, items_str))
items = list(items)
self.page_should_contain_list(locator)
select, options = self._get_select_list_options_selected(locator)
if not items and len(options) == 0:
return
selected_values = self._get_values_for_options(options)
selected_labels = self._get_labels_for_options(options)
err = "List '%s' should have had selection [ %s ] but it was [ %s ]" \
% (locator, ' | '.join(items), ' | '.join(selected_labels))
for item in items:
if item not in selected_values + selected_labels:
raise AssertionError(err)
for selected_value, selected_label in zip(selected_values, selected_labels):
if selected_value not in items and selected_label not in items:
raise AssertionError(err)
def list_should_have_no_selections(self, locator):
"""Verifies select list identified by `locator` has no selections.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
self._info("Verifying list '%s' has no selection." % locator)
select, options = self._get_select_list_options_selected(locator)
if options:
selected_labels = self._get_labels_for_options(options)
items_str = " | ".join(selected_labels)
raise AssertionError("List '%s' should have had no selection "
"(selection was [ %s ])" % (locator, items_str))
def page_should_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_contain_element(locator, 'list', message, loglevel)
def page_should_not_contain_list(self, locator, message='', loglevel='INFO'):
"""Verifies select list identified by `locator` is not found from current page.
See `Page Should Contain Element` for explanation about `message` and
`loglevel` arguments.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._page_should_not_contain_element(locator, 'list', message, loglevel)
def select_all_from_list(self, locator):
"""Selects all values from multi-select list identified by `id`.
Key attributes for lists are `id` and `name`. See `introduction` for
details about locating elements.
"""
self._info("Selecting all options from list '%s'." % locator)
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Select all from list' works only for multiselect lists.")
for i in range(len(select.options)):
select.select_by_index(i)
def select_from_list(self, locator, *items):
"""Selects `*items` from list identified by `locator`
If more than one value is given for a single-selection list, the last
value will be selected. If the target list is a multi-selection list,
and `*items` is an empty list, all values of the list will be selected.
*items try to select by value then by label.
It's faster to use 'by index/value/label' functions.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not items:
for i in range(len(select.options)):
select.select_by_index(i)
return
for item in items:
try: select.select_by_value(item)
except:
try: select.select_by_visible_text(item)
except: continue
def select_from_list_by_index(self, locator, *indexes):
"""Selects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for index in indexes:
select.select_by_index(int(index))
def select_from_list_by_value(self, locator, *values):
"""Selects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for value in values:
select.select_by_value(value)
def select_from_list_by_label(self, locator, *labels):
"""Selects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for label in labels:
select.select_by_visible_text(label)
def unselect_from_list(self, locator, *items):
"""Unselects given values from select list identified by locator.
As a special case, giving empty list as `*items` will remove all
selections.
*items try to unselect by value AND by label.
It's faster to use 'by index/value/label' functions.
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
items_str = items and "option(s) '%s'" % ", ".join(items) or "all options"
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
if not items:
select.deselect_all()
return
select, options = self._get_select_list_options(select)
for item in items:
select.deselect_by_value(item)
select.deselect_by_visible_text(item)
def unselect_from_list_by_index(self, locator, *indexes):
"""Unselects `*indexes` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not indexes:
raise ValueError("No index given.")
items_str = "index(es) '%s'" % ", ".join(indexes)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for index in indexes:
select.deselect_by_index(int(index))
def unselect_from_list_by_value(self, locator, *values):
"""Unselects `*values` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not values:
raise ValueError("No value given.")
items_str = "value(s) '%s'" % ", ".join(values)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for value in values:
select.deselect_by_value(value)
def unselect_from_list_by_label(self, locator, *labels):
"""Unselects `*labels` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not labels:
raise ValueError("No value given.")
items_str = "label(s) '%s'" % ", ".join(labels)
self._info("Unselecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
if not select.is_multiple:
raise RuntimeError("Keyword 'Unselect from list' works only for multiselect lists.")
for label in labels:
select.deselect_by_visible_text(label)
# Private
def _get_labels_for_options(self, options):
labels = []
for option in options:
labels.append(option.text)
return labels
def _get_select_list(self, locator):
el = self._element_find(locator, True, True, 'select')
return Select(el)
def _get_select_list_options(self, select_list_or_locator):
if isinstance(select_list_or_locator, Select):
select = select_list_or_locator
else:
select = self._get_select_list(select_list_or_locator)
return select, select.options
def _get_select_list_options_selected(self, locator):
select = self._get_select_list(locator)
# TODO: Handle possible exception thrown by all_selected_options
return select, select.all_selected_options
def _get_values_for_options(self, options):
values = []
for option in options:
values.append(option.get_attribute('value'))
return values
def _is_multiselect_list(self, select):
multiple_value = select.get_attribute('multiple')
if multiple_value is not None and (multiple_value == 'true' or multiple_value == 'multiple'):
return True
return False
def _unselect_all_options_from_multi_select_list(self, select):
self._current_browser().execute_script("arguments[0].selectedIndex = -1;", select)
def _unselect_option_from_multi_select_list(self, select, options, index):
if options[index].is_selected():
options[index].click()
| apache-2.0 |
WUJISHANXIA/wujishanxia | bootcamp/articles/tests/test_views.py | 1 | 6318 | from django.contrib.auth import get_user_model
from django.http import HttpResponseBadRequest
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from bootcamp.articles.models import Article
class TestViews(TestCase):
"""
Includes tests for all the functionality
associated with Views
"""
def setUp(self):
self.client = Client()
self.other_client = Client()
self.user = get_user_model().objects.create_user(
username='test_user',
email='[email protected]',
password='top_secret'
)
self.other_user = get_user_model().objects.create_user(
username='other_test_user',
email='[email protected]',
password='top_secret'
)
self.client.login(username='test_user', password='top_secret')
self.other_client.login(
username='other_test_user', password='top_secret')
self.title = 'A really nice to-be title'
self.content = '''This is a really good content, just if somebody published
it, that would be awesome, but no, nobody wants to publish it, because
they know this is just a test, and you know than nobody wants to
publish a test, just a test; everybody always wants the real deal.'''
self.article = Article.objects.create(
create_user=self.user, title='A really nice title',
content=self.content, tags='list, lists', status='P')
def test_index_articles(self):
response = self.client.get(reverse('articles'))
self.assertEqual(response.status_code, 200)
response_no_art = self.client.get(reverse(
'article', kwargs={'slug': 'no-slug'}))
self.assertEqual(response_no_art.status_code, 404)
def test_individual_article(self):
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list, lists',
'status': 'P'})
response_art = self.client.get(
reverse('article', kwargs={'slug': 'a-really-nice-to-be-title'}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response_art.status_code, 200)
self.assertEqual(response_art.context['article'].slug,
'a-really-nice-to-be-title')
def test_drafts_workflow(self):
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list, lists',
'status': 'D'
})
resp = self.client.get(reverse('drafts'))
self.assertEqual(response.status_code, 302)
self.assertEqual(resp.status_code, 200)
self.assertTrue(resp.context['drafts'][0].slug,
'a-really-nice-to-be-title')
def test_filter_by_tag(self):
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list',
'status': 'P'})
response_tag = self.client.get(
reverse('tag', kwargs={'tag_name': 'list'}))
self.assertEqual(response.status_code, 302)
self.assertEqual(response_tag.status_code, 200)
self.assertTrue(
'list' in list(response_tag.context['popular_tags'])[0])
def test_edits_article(self):
"""
"""
response = self.client.post(reverse('write'), {'title': self.title,
'content': self.content,
'tags': 'list, lists',
'status': 'P'
})
art = Article.objects.latest('create_date')
art_content = art.content
response_two = self.client.post(
reverse('edit_article', kwargs={'pk': art.id}),
{'content': 'some_different_content_here',
'title': self.title,
'tags': 'list, lists',
'status': 'P'})
art.refresh_from_db()
self.assertEqual(response.status_code, 302)
self.assertEqual(response_two.status_code, 302)
self.assertNotEqual(art_content, art.content)
def test_empty_preview(self):
request = self.client.post(reverse('preview'), {'content': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(request.status_code, 200)
self.assertEqual(request.content, b'Nothing to display :(')
def test_preview_with_text(self):
content = '<p>This is a really good content.</p>'
request = self.client.post(reverse('preview'), {'content': content},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(request.status_code, 200)
self.assertEqual(
request.content,
b'<p><p>This is a really good content.</p></p>')
def test_bad_request_preview(self):
request = self.client.get(reverse('preview'))
self.assertEqual(request.status_code, 400)
self.assertTrue(isinstance(request, HttpResponseBadRequest))
def test_comment_view(self):
request = self.client.post(reverse('comment'),
{'article': self.article.id,
'comment': 'This is a good comment'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(request.status_code, 200)
self.assertTrue(b'This is a good comment' in request.content)
def test_bad_request_comment(self):
request = self.client.get(reverse('comment'))
self.assertEqual(request.status_code, 400)
self.assertTrue(isinstance(request, HttpResponseBadRequest))
| mit |
Vizerai/grpc | tools/buildgen/plugins/generate_vsprojects.py | 19 | 3036 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Buildgen vsprojects plugin.
This parses the list of libraries, and generates globals "vsprojects"
and "vsproject_dict", to be used by the visual studio generators.
"""
import hashlib
import re
def mako_plugin(dictionary):
"""The exported plugin code for generate_vsprojeccts
We want to help the work of the visual studio generators.
"""
libs = dictionary.get('libs', [])
targets = dictionary.get('targets', [])
for lib in libs:
lib['is_library'] = True
for target in targets:
target['is_library'] = False
projects = []
projects.extend(libs)
projects.extend(targets)
for target in projects:
if 'build' in target and target['build'] == 'test':
default_test_dir = 'test'
else:
default_test_dir = '.'
if 'vs_config_type' not in target:
if 'build' in target and target['build'] == 'test':
target['vs_config_type'] = 'Application'
else:
target['vs_config_type'] = 'StaticLibrary'
if 'vs_packages' not in target:
target['vs_packages'] = []
if 'vs_props' not in target:
target['vs_props'] = []
target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
if target.get('vs_project_guid',
None) is None and 'windows' in target.get(
'platforms', ['windows']):
name = target['name']
guid = re.sub('(........)(....)(....)(....)(.*)',
r'{\1-\2-\3-\4-\5}',
hashlib.md5(name).hexdigest())
target['vs_project_guid'] = guid.upper()
# Exclude projects without a visual project guid, such as the tests.
projects = [
project for project in projects if project.get('vs_project_guid', None)
]
projects = [
project for project in projects
if project['language'] != 'c++' or project['build'] == 'all' or
project['build'] == 'protoc' or (project['language'] == 'c++' and (
project['build'] == 'test' or project['build'] == 'private'))
]
project_dict = dict([(p['name'], p) for p in projects])
packages = dictionary.get('vspackages', [])
packages_dict = dict([(p['name'], p) for p in packages])
dictionary['vsprojects'] = projects
dictionary['vsproject_dict'] = project_dict
dictionary['vspackages_dict'] = packages_dict
| apache-2.0 |
overtherain/scriptfile | software/googleAppEngine/lib/PyAMF/pyamf/tests/test_xml.py | 26 | 2009 | # -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for XML library integration
@since: 0.4
"""
import unittest
import pyamf.xml
from pyamf import util
class ElementTreeTestCase(unittest.TestCase):
"""
Tests the type mappings.
"""
xml = '<foo bar="baz" />'
def check_amf0(self, bytes, xml):
b = util.BufferedByteStream(bytes)
self.assertEqual(b.read_char(), 15)
l = b.read_ulong()
self.assertEqual(l, b.remaining())
self.assertEqual(b.read(), xml)
def check_amf3(self, bytes, xml):
b = util.BufferedByteStream(bytes)
self.assertEqual(b.read_char(), 11)
l = b.read_uchar()
self.assertEqual(l >> 1, b.remaining())
self.assertEqual(b.read(), xml)
for mod in pyamf.xml.ETREE_MODULES:
name = 'test_' + mod.replace('.', '_')
def check_etree(self):
# holy hack batman
import inspect
mod = inspect.stack()[1][0].f_locals['testMethod'].__name__[5:]
mod = mod.replace('_', '.')
try:
etree = util.get_module(mod)
except ImportError:
self.skipTest('%r is not available' % (mod,))
element = etree.fromstring(self.xml)
xml = etree.tostring(element)
old = pyamf.set_default_etree(etree)
if old:
self.addCleanup(lambda x: pyamf.set_default_etree(x), old)
bytes = pyamf.encode(element, encoding=pyamf.AMF0).getvalue()
self.check_amf0(bytes, xml)
new_element = pyamf.decode(bytes, encoding=pyamf.AMF0).next()
self.assertIdentical(type(element), type(new_element))
bytes = pyamf.encode(element, encoding=pyamf.AMF3).getvalue()
self.check_amf3(bytes, xml)
new_element = pyamf.decode(bytes, encoding=pyamf.AMF3).next()
self.assertIdentical(type(element), type(new_element))
check_etree.__name__ = name
setattr(ElementTreeTestCase, name, check_etree)
| mit |
albertrdixon/CouchPotatoServer | libs/bs4/builder/__init__.py | 447 | 11151 | from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
features = []
is_xml = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| gpl-3.0 |
CReSIS/OPS | conf/tools/createUserExample.py | 1 | 1232 | # After editing the script (e.g. with gedit), run this script by following these steps:
# (1) Switch to root user: sudo -i
# (2) Activate VirtualEnv: source /usr/bin/venv/bin/activate
# (3) Open a Python shell with Django environment: python /var/django/ops/manage.py shell
# (4) Run this script: execfile('createUserExample.py')
# (5) Press ctrl-d or type quit() or exit()
from django.contrib.auth.models import User
# set new user properties
userName='anonymous'
userEmail='[email protected]'
userPassword='anonymous'
# create the new user
newUser = User.objects.create_user(userName, userEmail, userPassword)
# set the user profile options (example for cresis superuser)
newUser.profile.rds_layer_groups = [1,2]
newUser.profile.accum_layer_groups = [1,2]
newUser.profile.kuband_layer_groups = [1,2]
newUser.profile.snow_layer_groups = [1,2]
newUser.profile.rds_season_groups = [1,2]
newUser.profile.accum_season_groups = [1,2]
newUser.profile.kuband_season_groups = [1,2]
newUser.profile.snow_season_groups = [1,2]
newUser.profile.layerGroupRelease = True
newUser.profile.bulkDeleteData = False
newUser.profile.createData = True
newUser.profile.seasonRelease = True
# save the user profile
newUser.profile.save()
| gpl-3.0 |
newemailjdm/pybrain | pybrain/rl/agents/agent.py | 31 | 1153 | __author__ = 'Tom Schaul, [email protected]'
from pybrain.utilities import abstractMethod, Named
class Agent(Named):
""" An agent is an entity capable of producing actions, based on previous observations.
Generally it will also learn from experience. It can interact directly with a Task.
"""
def integrateObservation(self, obs):
""" Integrate the current observation of the environment.
:arg obs: The last observation returned from the environment
:type obs: by default, this is assumed to be a numpy array of doubles
"""
pass
def getAction(self):
""" Return a chosen action.
:rtype: by default, this is assumed to ba a numpy array of doubles.
:note: This method is abstract and needs to be implemented.
"""
abstractMethod()
def giveReward(self, r):
""" Reward or punish the agent.
:key r: reward, if C{r} is positive, punishment if C{r} is negative
:type r: double
"""
pass
def newEpisode(self):
""" Inform the agent that a new episode has started. """
pass
| bsd-3-clause |
barykaed/Pelican-Test | fsp_env/Lib/re.py | 206 | 15262 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB ([email protected]).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode digits.
\D Matches any non-digit character; equivalent to [^\d].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode whitespace characters.
\S Matches any non-whitespace character; equivalent to [^\s].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
in bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the
range of Unicode alphanumeric characters (letters plus digits
plus underscore).
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
fullmatch Match a regular expression pattern to all of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
default).
For bytes patterns, this flag is the only available
behaviour and needn't be specified.
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE For compatibility only. Ignored for string patterns (it
is the default), and forbidden for bytes patterns.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "fullmatch", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X",
"U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
A = ASCII = sre_compile.SRE_FLAG_ASCII # assume ascii "locale"
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale"
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def fullmatch(pattern, string, flags=0):
"""Try to apply the pattern to all of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).fullmatch(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings. If
capturing parentheses are used in pattern, then the text of all
groups in the pattern are also returned as part of the resulting
list. If maxsplit is nonzero, at most maxsplit splits occur,
and the remainder of the string is returned as the final element
of the list."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more capturing groups are present in the pattern, return
a list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression caches"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum_str = frozenset(
"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
_alphanum_bytes = frozenset(
b"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
def escape(pattern):
"""
Escape all the characters in pattern except ASCII letters, numbers and '_'.
"""
if isinstance(pattern, str):
alphanum = _alphanum_str
s = list(pattern)
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return "".join(s)
else:
alphanum = _alphanum_bytes
s = []
esc = ord(b"\\")
for c in pattern:
if c in alphanum:
s.append(c)
else:
if c == 0:
s.extend(b"\\000")
else:
s.append(esc)
s.append(c)
return bytes(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 512
def _compile(pattern, flags):
# internal: compile pattern
bypass_cache = flags & DEBUG
if not bypass_cache:
try:
return _cache[type(pattern), pattern, flags]
except KeyError:
pass
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError(
"Cannot process flags argument with a compiled pattern")
return pattern
if not sre_compile.isstring(pattern):
raise TypeError("first argument must be string or compiled pattern")
p = sre_compile.compile(pattern, flags)
if not bypass_cache:
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[type(pattern), pattern, flags] = p
return p
def _compile_repl(repl, pattern):
# internal: compile replacement pattern
try:
return _cache_repl[repl, pattern]
except KeyError:
pass
p = sre_parse.parse_template(repl, pattern)
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[repl, pattern] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copyreg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copyreg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| mit |
riklaunim/django-custom-multisite | django/core/files/utils.py | 901 | 1230 | class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
def __iter__(self):
return iter(self.file)
| bsd-3-clause |
motion2015/a3 | common/djangoapps/embargo/migrations/0005_add_courseaccessrulehistory.py | 102 | 7906 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseAccessRuleHistory'
db.create_table('embargo_courseaccessrulehistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('course_key', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('snapshot', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('embargo', ['CourseAccessRuleHistory'])
def backwards(self, orm):
# Deleting model 'CourseAccessRuleHistory'
db.delete_table('embargo_courseaccessrulehistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.courseaccessrulehistory': {
'Meta': {'object_name': 'CourseAccessRuleHistory'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snapshot': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo'] | agpl-3.0 |
sanyaade-teachings/oppia | core/domain/exp_domain.py | 6 | 68060 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for an exploration, its states, and their constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used."""
__author__ = 'Sean Lip'
import copy
import logging
import re
import string
from core.domain import fs_domain
from core.domain import html_cleaner
from core.domain import gadget_registry
from core.domain import interaction_registry
from core.domain import param_domain
from core.domain import rule_domain
from core.domain import skins_services
import feconf
import jinja_utils
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
STATE_PROPERTY_PARAM_CHANGES = 'param_changes'
STATE_PROPERTY_CONTENT = 'content'
STATE_PROPERTY_INTERACTION_ID = 'widget_id'
STATE_PROPERTY_INTERACTION_CUST_ARGS = 'widget_customization_args'
STATE_PROPERTY_INTERACTION_HANDLERS = 'widget_handlers'
# Kept for legacy purposes; not used anymore.
STATE_PROPERTY_INTERACTION_STICKY = 'widget_sticky'
def _is_interaction_terminal(interaction_id):
"""Returns whether the given interaction id marks the end of an
exploration.
"""
return interaction_registry.Registry.get_interaction_by_id(
interaction_id).is_terminal
class ExplorationChange(object):
"""Domain object class for an exploration change.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
exploration snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
"""
STATE_PROPERTIES = (
STATE_PROPERTY_PARAM_CHANGES,
STATE_PROPERTY_CONTENT,
STATE_PROPERTY_INTERACTION_ID,
STATE_PROPERTY_INTERACTION_CUST_ARGS,
STATE_PROPERTY_INTERACTION_STICKY,
STATE_PROPERTY_INTERACTION_HANDLERS)
EXPLORATION_PROPERTIES = (
'title', 'category', 'objective', 'language_code', 'tags',
'blurb', 'author_notes', 'param_specs', 'param_changes',
'default_skin_id', 'init_state_name')
def __init__(self, change_dict):
"""Initializes an ExplorationChange object from a dict.
change_dict represents a command. It should have a 'cmd' key, and one
or more other keys. The keys depend on what the value for 'cmd' is.
The possible values for 'cmd' are listed below, together with the other
keys in the dict:
- 'add_state' (with state_name)
- 'rename_state' (with old_state_name and new_state_name)
- 'delete_state' (with state_name)
- 'edit_state_property' (with state_name, property_name, new_value and,
optionally, old_value)
- 'edit_exploration_property' (with property_name, new_value and,
optionally, old_value)
For a state, property_name must be one of STATE_PROPERTIES. For an
exploration, property_name must be one of EXPLORATION_PROPERTIES.
"""
if 'cmd' not in change_dict:
raise Exception('Invalid change_dict: %s' % change_dict)
self.cmd = change_dict['cmd']
if self.cmd == 'add_state':
self.state_name = change_dict['state_name']
elif self.cmd == 'rename_state':
self.old_state_name = change_dict['old_state_name']
self.new_state_name = change_dict['new_state_name']
elif self.cmd == 'delete_state':
self.state_name = change_dict['state_name']
elif self.cmd == 'edit_state_property':
if change_dict['property_name'] not in self.STATE_PROPERTIES:
raise Exception('Invalid change_dict: %s' % change_dict)
self.state_name = change_dict['state_name']
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == 'edit_exploration_property':
if (change_dict['property_name'] not in
self.EXPLORATION_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
else:
raise Exception('Invalid change_dict: %s' % change_dict)
class ExplorationCommitLogEntry(object):
"""Value object representing a commit to an exploration."""
def __init__(
self, created_on, last_updated, user_id, username, exploration_id,
commit_type, commit_message, commit_cmds, version,
post_commit_status, post_commit_community_owned,
post_commit_is_private):
self.created_on = created_on
self.last_updated = last_updated
self.user_id = user_id
self.username = username
self.exploration_id = exploration_id
self.commit_type = commit_type
self.commit_message = commit_message
self.commit_cmds = commit_cmds
self.version = version
self.post_commit_status = post_commit_status
self.post_commit_community_owned = post_commit_community_owned
self.post_commit_is_private = post_commit_is_private
def to_dict(self):
"""This omits created_on, user_id and (for now) commit_cmds."""
return {
'last_updated': utils.get_time_in_millisecs(self.last_updated),
'username': self.username,
'exploration_id': self.exploration_id,
'commit_type': self.commit_type,
'commit_message': self.commit_message,
'version': self.version,
'post_commit_status': self.post_commit_status,
'post_commit_community_owned': self.post_commit_community_owned,
'post_commit_is_private': self.post_commit_is_private,
}
class Content(object):
"""Value object representing non-interactive content."""
def to_dict(self):
return {'type': self.type, 'value': self.value}
@classmethod
def from_dict(cls, content_dict):
return cls(content_dict['type'], content_dict['value'])
def __init__(self, content_type, value=''):
self.type = content_type
self.value = html_cleaner.clean(value)
self.validate()
def validate(self):
# TODO(sll): Add HTML sanitization checking.
# TODO(sll): Validate customization args for rich-text components.
if not self.type == 'text':
raise utils.ValidationError('Invalid content type: %s' % self.type)
if not isinstance(self.value, basestring):
raise utils.ValidationError(
'Invalid content value: %s' % self.value)
def to_html(self, params):
"""Exports this content object to an HTML string.
The content object is parameterized using the parameters in `params`.
"""
if not isinstance(params, dict):
raise Exception(
'Expected context params for parsing content to be a dict, '
'received %s' % params)
return html_cleaner.clean(jinja_utils.parse_string(self.value, params))
class RuleSpec(object):
"""Value object representing a rule specification."""
def to_dict(self):
return {
'definition': self.definition,
'dest': self.dest,
'feedback': self.feedback,
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
}
def to_dict_with_obj_type(self):
dict_with_obj_type = self.to_dict()
dict_with_obj_type['obj_type'] = self.obj_type
return dict_with_obj_type
@classmethod
def from_dict_and_obj_type(cls, rulespec_dict, obj_type):
return cls(
rulespec_dict['definition'],
rulespec_dict['dest'],
rulespec_dict['feedback'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in rulespec_dict['param_changes']],
obj_type,
)
def __init__(self, definition, dest, feedback, param_changes, obj_type):
# A dict specifying the rule definition. E.g.
#
# {'rule_type': 'default'}
#
# or
#
# {
# 'rule_type': 'atomic',
# 'name': 'LessThan',
# 'subject': 'answer',
# 'inputs': {'x': 5}}
# }
#
self.definition = definition
# Id of the destination state.
# TODO(sll): Check that this state is END_DEST or actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback or []
self.feedback = [
html_cleaner.clean(feedback_item)
for feedback_item in self.feedback]
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
self.obj_type = obj_type
@property
def is_default(self):
"""Returns True if this spec corresponds to the default rule."""
return self.definition['rule_type'] == 'default'
@property
def is_generic(self):
"""Returns whether this rule is generic."""
if self.is_default:
return True
return rule_domain.is_generic(self.obj_type, self.definition['name'])
def get_feedback_string(self):
"""Returns a (possibly empty) string with feedback for this rule."""
return utils.get_random_choice(self.feedback) if self.feedback else ''
def __str__(self):
"""Returns a string representation of a rule (for the stats log)."""
if self.definition['rule_type'] == rule_domain.DEFAULT_RULE_TYPE:
return 'Default'
else:
# TODO(sll): Treat non-atomic rules too.
param_list = [utils.to_ascii(val) for
(key, val) in self.definition['inputs'].iteritems()]
return '%s(%s)' % (self.definition['name'], ','.join(param_list))
@classmethod
def get_default_rule_spec(cls, state_name, obj_type):
return RuleSpec({'rule_type': 'default'}, state_name, [], [], obj_type)
def validate(self):
if not isinstance(self.definition, dict):
raise utils.ValidationError(
'Expected rulespec definition to be a dict, received %s'
% self.definition)
if not isinstance(self.dest, basestring):
raise utils.ValidationError(
'Expected rulespec dest to be a string, received %s'
% self.dest)
if not self.dest:
raise utils.ValidationError(
'Every rulespec should have a destination.')
if not isinstance(self.feedback, list):
raise utils.ValidationError(
'Expected rulespec feedback to be a list, received %s'
% self.feedback)
for feedback_item in self.feedback:
if not isinstance(feedback_item, basestring):
raise utils.ValidationError(
'Expected rulespec feedback item to be a string, received '
'%s' % feedback_item)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected rulespec param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
@classmethod
def validate_rule_definition(cls, rule_definition, exp_param_specs):
ATOMIC_RULE_DEFINITION_SCHEMA = [
('inputs', dict), ('name', basestring), ('rule_type', basestring),
('subject', basestring)]
COMPOSITE_RULE_DEFINITION_SCHEMA = [
('children', list), ('rule_type', basestring)]
DEFAULT_RULE_DEFINITION_SCHEMA = [('rule_type', basestring)]
ALLOWED_COMPOSITE_RULE_TYPES = [
rule_domain.AND_RULE_TYPE, rule_domain.OR_RULE_TYPE,
rule_domain.NOT_RULE_TYPE]
if 'rule_type' not in rule_definition:
raise utils.ValidationError(
'Rule definition %s contains no rule type.' % rule_definition)
rule_type = rule_definition['rule_type']
if rule_type == rule_domain.DEFAULT_RULE_TYPE:
utils.verify_dict_keys_and_types(
rule_definition, DEFAULT_RULE_DEFINITION_SCHEMA)
elif rule_type == rule_domain.ATOMIC_RULE_TYPE:
utils.verify_dict_keys_and_types(
rule_definition, ATOMIC_RULE_DEFINITION_SCHEMA)
if (rule_definition['subject'] not in exp_param_specs
and rule_definition['subject'] != 'answer'):
raise utils.ValidationError(
'Unrecognized rule subject: %s' %
rule_definition['subject'])
else:
if rule_type not in ALLOWED_COMPOSITE_RULE_TYPES:
raise utils.ValidationError(
'Unsupported rule type %s.' % rule_type)
utils.verify_dict_keys_and_types(
rule_definition, COMPOSITE_RULE_DEFINITION_SCHEMA)
for child_rule in rule_definition['children']:
cls.validate_rule_definition(child_rule, exp_param_specs)
DEFAULT_RULESPEC_STR = 'Default'
class AnswerHandlerInstance(object):
"""Value object for an answer event stream (submit, click ,drag, etc.)."""
def to_dict(self):
return {
'name': self.name,
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs]
}
@classmethod
def from_dict_and_obj_type(cls, handler_dict, obj_type):
return cls(
handler_dict['name'],
[RuleSpec.from_dict_and_obj_type(rs, obj_type)
for rs in handler_dict['rule_specs']],
)
def __init__(self, name, rule_specs=None):
if rule_specs is None:
rule_specs = []
self.name = name
self.rule_specs = [RuleSpec(
rule_spec.definition, rule_spec.dest, rule_spec.feedback,
rule_spec.param_changes, rule_spec.obj_type
) for rule_spec in rule_specs]
@property
def default_rule_spec(self):
"""The default rule spec."""
assert self.rule_specs[-1].is_default
return self.rule_specs[-1]
@classmethod
def get_default_handler(cls, state_name, obj_type):
return cls('submit', [
RuleSpec.get_default_rule_spec(state_name, obj_type)])
def validate(self):
if self.name != 'submit':
raise utils.ValidationError(
'Unexpected answer handler name: %s' % self.name)
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer handler rule specs to be a list, received %s'
% self.rule_specs)
if len(self.rule_specs) < 1:
raise utils.ValidationError(
'There must be at least one rule spec for each answer handler.'
% self.rule_specs)
for rule_spec in self.rule_specs:
rule_spec.validate()
class InteractionInstance(object):
"""Value object for an instance of an interaction."""
# The default interaction used for a new state.
_DEFAULT_INTERACTION_ID = None
def _get_full_customization_args(self):
"""Populates the customization_args dict of the interaction with
default values, if any of the expected customization_args are missing.
"""
full_customization_args_dict = copy.deepcopy(self.customization_args)
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
for ca_spec in interaction.customization_arg_specs:
if ca_spec.name not in full_customization_args_dict:
full_customization_args_dict[ca_spec.name] = {
'value': ca_spec.default_value
}
return full_customization_args_dict
def to_dict(self):
return {
'id': self.id,
'customization_args': (
{} if self.id is None
else self._get_full_customization_args()),
'handlers': [handler.to_dict() for handler in self.handlers],
}
@classmethod
def _get_obj_type(cls, interaction_id):
if interaction_id is None:
return None
else:
return interaction_registry.Registry.get_interaction_by_id(
interaction_id)._handlers[0]['obj_type']
@classmethod
def from_dict(cls, interaction_dict):
obj_type = cls._get_obj_type(interaction_dict['id'])
return cls(
interaction_dict['id'],
interaction_dict['customization_args'],
[AnswerHandlerInstance.from_dict_and_obj_type(h, obj_type)
for h in interaction_dict['handlers']])
def __init__(
self, interaction_id, customization_args, handlers):
self.id = interaction_id
# Customization args for the interaction's view. Parts of these
# args may be Jinja templates that refer to state parameters.
# This is a dict: the keys are names of customization_args and the
# values are dicts with a single key, 'value', whose corresponding
# value is the value of the customization arg.
self.customization_args = customization_args
# Answer handlers and rule specs.
self.handlers = [AnswerHandlerInstance(h.name, h.rule_specs)
for h in handlers]
@property
def is_terminal(self):
return interaction_registry.Registry.get_interaction_by_id(
self.id).is_terminal
def validate(self):
if not isinstance(self.id, basestring):
raise utils.ValidationError(
'Expected interaction id to be a string, received %s' %
self.id)
try:
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
except KeyError:
raise utils.ValidationError('Invalid interaction id: %s' % self.id)
customization_arg_names = [
ca_spec.name for ca_spec in interaction.customization_arg_specs]
if not isinstance(self.customization_args, dict):
raise utils.ValidationError(
'Expected customization args to be a dict, received %s'
% self.customization_args)
# Validate and clean up the customization args.
extra_args = []
for (arg_name, arg_value) in self.customization_args.iteritems():
if not isinstance(arg_name, basestring):
raise utils.ValidationError(
'Invalid customization arg name: %s' % arg_name)
if arg_name not in customization_arg_names:
extra_args.append(arg_name)
logging.warning(
'Interaction %s does not support customization arg %s.'
% (self.id, arg_name))
for extra_arg in extra_args:
del self.customization_args[extra_arg]
try:
interaction.validate_customization_arg_values(
self.customization_args)
except Exception:
# TODO(sll): Raise an exception here if parameters are not
# involved. (If they are, can we get sample values for the state
# context parameters?)
pass
if not isinstance(self.handlers, list):
raise utils.ValidationError(
'Expected answer handlers to be a list, received %s'
% self.handlers)
if len(self.handlers) < 1:
raise utils.ValidationError(
'At least one answer handler must be specified for each '
'interaction instance.')
for handler in self.handlers:
handler.validate()
@classmethod
def create_default_interaction(cls, default_dest_state_name):
default_obj_type = InteractionInstance._get_obj_type(
cls._DEFAULT_INTERACTION_ID)
return cls(
cls._DEFAULT_INTERACTION_ID,
{},
[AnswerHandlerInstance.get_default_handler(
default_dest_state_name, default_obj_type)]
)
class GadgetInstance(object):
"""Value object for an instance of a gadget."""
def __init__(self, gadget_id, visible_in_states, customization_args):
self.id = gadget_id
# List of State name strings where this Gadget is visible.
self.visible_in_states = visible_in_states
# Customization args for the gadget's view.
self.customization_args = customization_args
@property
def gadget(self):
"""Gadget spec for validation and derived properties below."""
return gadget_registry.Registry.get_gadget_by_id(self.id)
@property
def width(self):
"""Width in pixels."""
return self.gadget.get_width(self.customization_args)
@property
def height(self):
"""Height in pixels."""
return self.gadget.get_height(self.customization_args)
def validate(self):
"""Validate attributes of this GadgetInstance."""
try:
self.gadget
except KeyError:
raise utils.ValidationError(
'Unknown gadget with ID %s is not in the registry.' % self.id)
unknown_customization_arguments = set(
self.customization_args.iterkeys()) - set(
[customization_arg.name for customization_arg
in self.gadget.customization_arg_specs])
if unknown_customization_arguments:
for arg_name in unknown_customization_arguments:
logging.warning(
'Gadget %s does not support customization arg %s.'
% (self.id, arg_name))
del self.customization_args[arg_name]
self.gadget.validate(self.customization_args)
if self.visible_in_states == []:
raise utils.ValidationError(
'%s gadget not visible in any states.' % (
self.gadget.name))
def to_dict(self):
"""Returns GadgetInstance data represented in dict form."""
return {
'gadget_id': self.id,
'visible_in_states': self.visible_in_states,
'customization_args': self._get_full_customization_args(),
}
@classmethod
def from_dict(cls, gadget_dict):
"""Returns GadgetInstance constructed from dict data."""
return GadgetInstance(
gadget_dict['gadget_id'],
gadget_dict['visible_in_states'],
gadget_dict['customization_args'])
def _get_full_customization_args(self):
"""Populates the customization_args dict of the gadget with
default values, if any of the expected customization_args are missing.
"""
full_customization_args_dict = copy.deepcopy(self.customization_args)
for ca_spec in self.gadget.customization_arg_specs:
if ca_spec.name not in full_customization_args_dict:
full_customization_args_dict[ca_spec.name] = {
'value': ca_spec.default_value
}
return full_customization_args_dict
class SkinInstance(object):
"""Domain object for a skin instance."""
def __init__(self, skin_id, skin_customizations):
self.skin_id = skin_id
# panel_contents_dict has gadget_panel_name strings as keys and
# lists of GadgetInstance instances as values.
self.panel_contents_dict = {}
for panel_name, gdict_list in skin_customizations[
'panels_contents'].iteritems():
self.panel_contents_dict[panel_name] = [GadgetInstance(
gdict['gadget_id'], gdict['visible_in_states'],
gdict['customization_args']) for gdict in gdict_list]
@property
def skin(self):
"""Skin spec for validation and derived properties."""
return skins_services.Registry.get_skin_by_id(self.skin_id)
def validate(self):
"""Validates that gadgets fit the skin panel dimensions, and that the
gadgets themselves are valid."""
for panel_name, gadget_instances_list in (
self.panel_contents_dict.iteritems()):
# Validate existence of panels in the skin.
if not panel_name in self.skin.panels_properties:
raise utils.ValidationError(
'%s panel not found in skin %s' % (
panel_name, self.skin_id)
)
# Validate gadgets fit each skin panel.
self.skin.validate_panel(panel_name, gadget_instances_list)
# Validate gadget internal attributes.
for gadget_instance in gadget_instances_list:
gadget_instance.validate()
def to_dict(self):
"""Returns SkinInstance data represented in dict form.
"""
return {
'skin_id': self.skin_id,
'skin_customizations': {
'panels_contents': {
panel_name: [
gadget_instance.to_dict() for gadget_instance
in instances_list]
for panel_name, instances_list in
self.panel_contents_dict.iteritems()
},
}
}
@classmethod
def from_dict(cls, skin_dict):
"""Returns SkinInstance instance given dict form."""
return SkinInstance(
skin_dict['skin_id'],
skin_dict['skin_customizations'])
def get_state_names_required_by_gadgets(self):
"""Returns a list of strings representing State names required by
GadgetInstances in this skin."""
state_names = set()
for gadget_instances_list in self.panel_contents_dict.values():
for gadget_instance in gadget_instances_list:
for state_name in gadget_instance.visible_in_states:
state_names.add(state_name)
# We convert to a sorted list for clean deterministic testing.
return sorted(state_names)
class State(object):
"""Domain object for a state."""
NULL_INTERACTION_DICT = {
'id': None,
'customization_args': {},
'handlers': [{
'name': 'submit',
'rule_specs': [{
'dest': feconf.DEFAULT_INIT_STATE_NAME,
'definition': {
'rule_type': 'default',
},
'feedback': [],
'param_changes': [],
}],
}],
}
def __init__(self, content, param_changes, interaction):
# The content displayed to the reader in this state.
self.content = [Content(item.type, item.value) for item in content]
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args)
for param_change in param_changes]
# The interaction instance associated with this state.
self.interaction = InteractionInstance(
interaction.id, interaction.customization_args,
interaction.handlers)
def validate(self, allow_null_interaction):
if not isinstance(self.content, list):
raise utils.ValidationError(
'Expected state content to be a list, received %s'
% self.content)
if len(self.content) != 1:
raise utils.ValidationError(
'The state content list must have exactly one element. '
'Received %s' % self.content)
self.content[0].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if not allow_null_interaction:
if self.interaction.id is None:
raise utils.ValidationError(
'This state does not have any interaction specified.')
else:
self.interaction.validate()
def update_content(self, content_list):
# TODO(sll): Must sanitize all content in RTE component attrs.
self.content = [Content.from_dict(content_list[0])]
def update_param_changes(self, param_change_dicts):
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_change_dicts]
def update_interaction_id(self, interaction_id):
self.interaction.id = interaction_id
# TODO(sll): This should also clear interaction.handlers (except for
# the default rule). This is somewhat mitigated because the client
# updates interaction_handlers directly after this, but we should fix
# it.
def update_interaction_customization_args(self, customization_args):
self.interaction.customization_args = customization_args
def update_interaction_handlers(self, handlers_dict):
if not isinstance(handlers_dict, dict):
raise Exception(
'Expected interaction_handlers to be a dictionary, received %s'
% handlers_dict)
ruleset = handlers_dict[feconf.SUBMIT_HANDLER_NAME]
if not isinstance(ruleset, list):
raise Exception(
'Expected interaction_handlers.submit to be a list, '
'received %s' % ruleset)
interaction_handlers = [AnswerHandlerInstance('submit', [])]
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for rule_ind in range(len(ruleset)):
rule_dict = ruleset[rule_ind]
rule_dict['feedback'] = [html_cleaner.clean(feedback)
for feedback in rule_dict['feedback']]
if 'param_changes' not in rule_dict:
rule_dict['param_changes'] = []
obj_type = InteractionInstance._get_obj_type(self.interaction.id)
rule_spec = RuleSpec.from_dict_and_obj_type(rule_dict, obj_type)
rule_type = rule_spec.definition['rule_type']
if rule_ind == len(ruleset) - 1:
if rule_type != rule_domain.DEFAULT_RULE_TYPE:
raise ValueError(
'Invalid ruleset %s: the last rule should be a '
'default rule' % rule_dict)
else:
if rule_type == rule_domain.DEFAULT_RULE_TYPE:
raise ValueError(
'Invalid ruleset %s: rules other than the '
'last one should not be default rules.' % rule_dict)
# TODO(sll): Generalize this to Boolean combinations of rules.
matched_rule = (
interaction_registry.Registry.get_interaction_by_id(
self.interaction.id
).get_rule_by_name('submit', rule_spec.definition['name']))
# Normalize and store the rule params.
# TODO(sll): Generalize this to Boolean combinations of rules.
rule_inputs = rule_spec.definition['inputs']
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.iteritems():
param_type = rule_domain.get_obj_type_for_param_name(
matched_rule, param_name)
if (isinstance(value, basestring) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except TypeError:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
interaction_handlers[0].rule_specs.append(rule_spec)
self.interaction.handlers = interaction_handlers
def to_dict(self):
return {
'content': [item.to_dict() for item in self.content],
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'interaction': self.interaction.to_dict()
}
@classmethod
def _get_current_state_dict(cls, state_dict):
"""If the state dict still uses 'widget', change it to 'interaction'.
This corresponds to the v3 --> v4 migration in the YAML representation
of an exploration.
"""
if 'widget' in state_dict:
# This is an old version of the state dict which still uses
# 'widget'.
state_dict['interaction'] = copy.deepcopy(state_dict['widget'])
state_dict['interaction']['id'] = copy.deepcopy(
state_dict['interaction']['widget_id'])
del state_dict['interaction']['widget_id']
del state_dict['widget']
return copy.deepcopy(state_dict)
@classmethod
def from_dict(cls, state_dict):
current_state_dict = cls._get_current_state_dict(state_dict)
return cls(
[Content.from_dict(item)
for item in current_state_dict['content']],
[param_domain.ParamChange.from_dict(param)
for param in current_state_dict['param_changes']],
InteractionInstance.from_dict(current_state_dict['interaction']))
@classmethod
def create_default_state(
cls, default_dest_state_name, is_initial_state=False):
text_str = (
feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '')
return cls(
[Content('text', text_str)], [],
InteractionInstance.create_default_interaction(
default_dest_state_name))
class Exploration(object):
"""Domain object for an Oppia exploration."""
def __init__(self, exploration_id, title, category, objective,
language_code, tags, blurb, author_notes, default_skin,
skin_customizations, init_state_name, states_dict,
param_specs_dict, param_changes_list, version,
created_on=None, last_updated=None):
self.id = exploration_id
self.title = title
self.category = category
self.objective = objective
self.language_code = language_code
self.tags = tags
self.blurb = blurb
self.author_notes = author_notes
self.default_skin = default_skin
self.init_state_name = init_state_name
self.skin_instance = SkinInstance(default_skin, skin_customizations)
self.states = {}
for (state_name, state_dict) in states_dict.iteritems():
self.states[state_name] = State.from_dict(state_dict)
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.iteritems()
}
self.param_changes = [
param_domain.ParamChange.from_dict(param_change_dict)
for param_change_dict in param_changes_list]
self.version = version
self.created_on = created_on
self.last_updated = last_updated
def is_equal_to(self, other):
simple_props = [
'id', 'title', 'category', 'objective', 'language_code',
'tags', 'blurb', 'author_notes', 'default_skin',
'init_state_name', 'version']
for prop in simple_props:
if getattr(self, prop) != getattr(other, prop):
return False
for (state_name, state_obj) in self.states.iteritems():
if state_name not in other.states:
return False
if state_obj.to_dict() != other.states[state_name].to_dict():
return False
for (ps_name, ps_obj) in self.param_specs.iteritems():
if ps_name not in other.param_specs:
return False
if ps_obj.to_dict() != other.param_specs[ps_name].to_dict():
return False
for i in xrange(len(self.param_changes)):
if (self.param_changes[i].to_dict() !=
other.param_changes[i].to_dict()):
return False
return True
@classmethod
def create_default_exploration(
cls, exploration_id, title, category, objective='',
language_code=feconf.DEFAULT_LANGUAGE_CODE):
init_state_dict = State.create_default_state(
feconf.DEFAULT_INIT_STATE_NAME, is_initial_state=True).to_dict()
states_dict = {
feconf.DEFAULT_INIT_STATE_NAME: init_state_dict
}
return cls(
exploration_id, title, category, objective, language_code, [], '',
'', 'conversation_v1', feconf.DEFAULT_SKIN_CUSTOMIZATIONS,
feconf.DEFAULT_INIT_STATE_NAME, states_dict, {}, [], 0)
@classmethod
def _require_valid_name(cls, name, name_type):
"""Generic name validation.
Args:
name: the name to validate.
name_type: a human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
"""
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise utils.ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise utils.ValidationError(
'Names should not start or end with whitespace.')
if re.search('\s\s+', name):
raise utils.ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for c in feconf.INVALID_NAME_CHARS:
if c in name:
raise utils.ValidationError(
'Invalid character %s in %s: %s' % (c, name_type, name))
@classmethod
def _require_valid_state_name(cls, name):
cls._require_valid_name(name, 'a state name')
if name.lower() == feconf.END_DEST.lower():
raise utils.ValidationError(
'Invalid state name: %s' % feconf.END_DEST)
def validate(self, strict=False, allow_null_interaction=False):
"""Validates the exploration before it is committed to storage.
If strict is True, performs advanced validation.
"""
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
self._require_valid_name(self.title, 'the exploration title')
if not isinstance(self.category, basestring):
raise utils.ValidationError(
'Expected category to be a string, received %s'
% self.category)
self._require_valid_name(self.category, 'the exploration category')
if not isinstance(self.objective, basestring):
raise utils.ValidationError(
'Expected objective to be a string, received %s' %
self.objective)
if not isinstance(self.language_code, basestring):
raise utils.ValidationError(
'Expected language_code to be a string, received %s' %
self.language_code)
if not any([self.language_code == lc['code']
for lc in feconf.ALL_LANGUAGE_CODES]):
raise utils.ValidationError(
'Invalid language_code: %s' % self.language_code)
if not isinstance(self.tags, list):
raise utils.ValidationError(
'Expected \'tags\' to be a list, received %s' % self.tags)
for tag in self.tags:
if not isinstance(tag, basestring):
raise utils.ValidationError(
'Expected each tag in \'tags\' to be a string, received '
'\'%s\'' % tag)
if not tag:
raise utils.ValidationError('Tags should be non-empty.')
if not re.match(feconf.TAG_REGEX, tag):
raise utils.ValidationError(
'Tags should only contain lowercase letters and spaces, '
'received \'%s\'' % tag)
if (tag[0] not in string.ascii_lowercase or
tag[-1] not in string.ascii_lowercase):
raise utils.ValidationError(
'Tags should not start or end with whitespace, received '
' \'%s\'' % tag)
if re.search('\s\s+', tag):
raise utils.ValidationError(
'Adjacent whitespace in tags should be collapsed, '
'received \'%s\'' % tag)
if len(set(self.tags)) != len(self.tags):
raise utils.ValidationError('Some tags duplicate each other')
if not isinstance(self.blurb, basestring):
raise utils.ValidationError(
'Expected blurb to be a string, received %s' % self.blurb)
if not isinstance(self.author_notes, basestring):
raise utils.ValidationError(
'Expected author_notes to be a string, received %s' %
self.author_notes)
if not self.default_skin:
raise utils.ValidationError(
'Expected a default_skin to be specified.')
if not isinstance(self.default_skin, basestring):
raise utils.ValidationError(
'Expected default_skin to be a string, received %s (%s).'
% self.default_skin, type(self.default_skin))
if not self.default_skin in skins_services.Registry.get_all_skin_ids():
raise utils.ValidationError(
'Unrecognized skin id: %s' % self.default_skin)
if not isinstance(self.states, dict):
raise utils.ValidationError(
'Expected states to be a dict, received %s' % self.states)
if not self.states:
raise utils.ValidationError('This exploration has no states.')
for state_name in self.states:
self._require_valid_state_name(state_name)
self.states[state_name].validate(
allow_null_interaction=allow_null_interaction)
if not self.init_state_name:
raise utils.ValidationError(
'This exploration has no initial state name specified.')
if self.init_state_name not in self.states:
raise utils.ValidationError(
'There is no state in %s corresponding to the exploration\'s '
'initial state name %s.' %
(self.states.keys(), self.init_state_name))
if not isinstance(self.param_specs, dict):
raise utils.ValidationError(
'Expected param_specs to be a dict, received %s'
% self.param_specs)
for param_name in self.param_specs:
if not isinstance(param_name, basestring):
raise utils.ValidationError(
'Expected parameter name to be a string, received %s (%s).'
% param_name, type(param_name))
if not re.match(feconf.ALPHANUMERIC_REGEX, param_name):
raise utils.ValidationError(
'Only parameter names with characters in [a-zA-Z0-9] are '
'accepted.')
self.param_specs[param_name].validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'No parameter named \'%s\' exists in this exploration'
% param_change.name)
if param_change.name in feconf.INVALID_PARAMETER_NAMES:
raise utils.ValidationError(
'The exploration-level parameter with name \'%s\' is '
'reserved. Please choose a different name.'
% param_change.name)
# TODO(sll): Find a way to verify the param change customization args
# when they depend on exploration/state parameters (e.g. the generated
# values must have the correct obj_type). Can we get sample values for
# the reader's answer and these parameters by looking at states that
# link to this one?
# Check that all state param changes are valid.
for state_name, state in self.states.iteritems():
for param_change in state.param_changes:
param_change.validate()
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter with name \'%s\' was set in state '
'\'%s\', but it does not exist in the list of '
'parameter specifications for this exploration.'
% (param_change.name, state_name))
if param_change.name in feconf.INVALID_PARAMETER_NAMES:
raise utils.ValidationError(
'The parameter name \'%s\' is reserved. Please choose '
'a different name for the parameter being set in '
'state \'%s\'.' % (param_change.name, state_name))
# Check that all rule definitions, destinations and param changes are
# valid.
all_state_names = self.states.keys() + [feconf.END_DEST]
for state in self.states.values():
for handler in state.interaction.handlers:
for rule_spec in handler.rule_specs:
RuleSpec.validate_rule_definition(
rule_spec.definition, self.param_specs)
if rule_spec.dest not in all_state_names:
raise utils.ValidationError(
'The destination %s is not a valid state.'
% rule_spec.dest)
for param_change in rule_spec.param_changes:
if param_change.name not in self.param_specs:
raise utils.ValidationError(
'The parameter %s was used in a rule, but it '
'does not exist in this exploration'
% param_change.name)
# Check that state names required by gadgets exist.
state_names_required_by_gadgets = set(
self.skin_instance.get_state_names_required_by_gadgets())
missing_state_names = state_names_required_by_gadgets - set(
self.states.keys())
if missing_state_names:
raise utils.ValidationError(
'Exploration missing required state%s: %s' % (
's' if len(missing_state_names) > 1 else '',
', '.join(sorted(missing_state_names)))
)
# Check that GadgetInstances fit the skin and that gadgets are valid.
self.skin_instance.validate()
if strict:
warnings_list = []
try:
self._verify_all_states_reachable()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
try:
self._verify_no_dead_ends()
except utils.ValidationError as e:
warnings_list.append(unicode(e))
if not self.objective:
warnings_list.append(
'An objective must be specified (in the \'Settings\' tab).'
)
if not self.language_code:
warnings_list.append(
'A language must be specified (in the \'Settings\' tab).')
if len(warnings_list) > 0:
warning_str = ''
for ind, warning in enumerate(warnings_list):
warning_str += '%s. %s ' % (ind + 1, warning)
raise utils.ValidationError(
'Please fix the following issues before saving this '
'exploration: %s' % warning_str)
def _verify_all_states_reachable(self):
"""Verifies that all states are reachable from the initial state."""
# This queue stores state names.
processed_queue = []
curr_queue = [self.init_state_name]
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if curr_state_name in processed_queue:
continue
processed_queue.append(curr_state_name)
curr_state = self.states[curr_state_name]
if not _is_interaction_terminal(curr_state.interaction.id):
for handler in curr_state.interaction.handlers:
for rule in handler.rule_specs:
dest_state = rule.dest
if (dest_state not in curr_queue and
dest_state not in processed_queue and
dest_state != feconf.END_DEST):
curr_queue.append(dest_state)
if len(self.states) != len(processed_queue):
unseen_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'The following states are not reachable from the initial '
'state: %s' % ', '.join(unseen_states))
def _verify_no_dead_ends(self):
"""Verifies that all states can reach a terminal state."""
# This queue stores state names.
processed_queue = []
curr_queue = [feconf.END_DEST]
for (state_name, state) in self.states.iteritems():
if _is_interaction_terminal(state.interaction.id):
curr_queue.append(state_name)
while curr_queue:
curr_state_name = curr_queue[0]
curr_queue = curr_queue[1:]
if curr_state_name in processed_queue:
continue
if curr_state_name != feconf.END_DEST:
processed_queue.append(curr_state_name)
for (state_name, state) in self.states.iteritems():
if (state_name not in curr_queue
and state_name not in processed_queue):
for handler in state.interaction.handlers:
for rule_spec in handler.rule_specs:
if rule_spec.dest == curr_state_name:
curr_queue.append(state_name)
break
if len(self.states) != len(processed_queue):
dead_end_states = list(
set(self.states.keys()) - set(processed_queue))
raise utils.ValidationError(
'It is impossible to complete the exploration from the '
'following states: %s' % ', '.join(dead_end_states))
# Derived attributes of an exploration,
@property
def init_state(self):
"""The state which forms the start of this exploration."""
return self.states[self.init_state_name]
@property
def param_specs_dict(self):
"""A dict of param specs, each represented as Python dicts."""
return {ps_name: ps_val.to_dict()
for (ps_name, ps_val) in self.param_specs.iteritems()}
@property
def param_change_dicts(self):
"""A list of param changes, represented as JSONifiable Python dicts."""
return [param_change.to_dict() for param_change in self.param_changes]
@classmethod
def is_demo_exploration_id(cls, exploration_id):
"""Whether the exploration id is that of a demo exploration."""
return exploration_id.isdigit() and (
0 <= int(exploration_id) < len(feconf.DEMO_EXPLORATIONS))
@property
def is_demo(self):
"""Whether the exploration is one of the demo explorations."""
return self.is_demo_exploration_id(self.id)
def update_title(self, title):
self.title = title
def update_category(self, category):
self.category = category
def update_objective(self, objective):
self.objective = objective
def update_language_code(self, language_code):
self.language_code = language_code
def update_tags(self, tags):
self.tags = tags
def update_blurb(self, blurb):
self.blurb = blurb
def update_author_notes(self, author_notes):
self.author_notes = author_notes
def update_param_specs(self, param_specs_dict):
self.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val)
for (ps_name, ps_val) in param_specs_dict.iteritems()
}
def update_param_changes(self, param_changes_list):
self.param_changes = [
param_domain.ParamChange.from_dict(param_change)
for param_change in param_changes_list
]
def update_default_skin_id(self, default_skin_id):
self.default_skin = default_skin_id
def update_init_state_name(self, init_state_name):
if init_state_name not in self.states:
raise Exception(
'Invalid new initial state name: %s; '
'it is not in the list of states %s for this '
'exploration.' % (init_state_name, self.states.keys()))
self.init_state_name = init_state_name
# Methods relating to states.
def add_states(self, state_names):
"""Adds multiple states to the exploration."""
for state_name in state_names:
if state_name in self.states:
raise ValueError('Duplicate state name %s' % state_name)
for state_name in state_names:
self.states[state_name] = State.create_default_state(state_name)
def rename_state(self, old_state_name, new_state_name):
"""Renames the given state."""
if old_state_name not in self.states:
raise ValueError('State %s does not exist' % old_state_name)
if (old_state_name != new_state_name and
new_state_name in self.states):
raise ValueError('Duplicate state name: %s' % new_state_name)
if old_state_name == new_state_name:
return
self._require_valid_state_name(new_state_name)
self.states[new_state_name] = copy.deepcopy(
self.states[old_state_name])
del self.states[old_state_name]
if self.init_state_name == old_state_name:
self.update_init_state_name(new_state_name)
# Find all destinations in the exploration which equal the renamed
# state, and change the name appropriately.
for other_state_name in self.states:
other_state = self.states[other_state_name]
for handler in other_state.interaction.handlers:
for rule in handler.rule_specs:
if rule.dest == old_state_name:
rule.dest = new_state_name
def delete_state(self, state_name):
"""Deletes the given state."""
if state_name not in self.states:
raise ValueError('State %s does not exist' % state_name)
# Do not allow deletion of initial states.
if self.init_state_name == state_name:
raise ValueError('Cannot delete initial state of an exploration.')
# Find all destinations in the exploration which equal the deleted
# state, and change them to loop back to their containing state.
for other_state_name in self.states:
other_state = self.states[other_state_name]
for handler in other_state.interaction.handlers:
for rule in handler.rule_specs:
if rule.dest == state_name:
rule.dest = other_state_name
del self.states[state_name]
# The current version of the exploration schema. If any backward-
# incompatible changes are made to the exploration schema in the YAML
# definitions, this version number must be changed and a migration process
# put in place.
CURRENT_EXPLORATION_SCHEMA_VERSION = 5
@classmethod
def _convert_v1_dict_to_v2_dict(cls, exploration_dict):
"""Converts a v1 exploration dict into a v2 exploration dict."""
exploration_dict['schema_version'] = 2
exploration_dict['init_state_name'] = (
exploration_dict['states'][0]['name'])
states_dict = {}
for state in exploration_dict['states']:
states_dict[state['name']] = state
del states_dict[state['name']]['name']
exploration_dict['states'] = states_dict
return exploration_dict
@classmethod
def _convert_v2_dict_to_v3_dict(cls, exploration_dict):
"""Converts a v2 exploration dict into a v3 exploration dict."""
exploration_dict['schema_version'] = 3
exploration_dict['objective'] = ''
exploration_dict['language_code'] = feconf.DEFAULT_LANGUAGE_CODE
exploration_dict['skill_tags'] = []
exploration_dict['blurb'] = ''
exploration_dict['author_notes'] = ''
return exploration_dict
@classmethod
def _convert_v3_dict_to_v4_dict(cls, exploration_dict):
"""Converts a v3 exploration dict into a v4 exploration dict."""
exploration_dict['schema_version'] = 4
for _, state_defn in exploration_dict['states'].iteritems():
state_defn['interaction'] = copy.deepcopy(state_defn['widget'])
state_defn['interaction']['id'] = copy.deepcopy(
state_defn['interaction']['widget_id'])
del state_defn['interaction']['widget_id']
del state_defn['interaction']['sticky']
del state_defn['widget']
return exploration_dict
@classmethod
def _convert_v4_dict_to_v5_dict(cls, exploration_dict):
"""Converts a v4 exploration dict into a v5 exploration dict."""
exploration_dict['schema_version'] = 5
# Rename the 'skill_tags' field to 'tags'.
exploration_dict['tags'] = exploration_dict['skill_tags']
del exploration_dict['skill_tags']
exploration_dict['skin_customizations'] = (
feconf.DEFAULT_SKIN_CUSTOMIZATIONS)
return exploration_dict
@classmethod
def from_yaml(cls, exploration_id, title, category, yaml_content):
"""Creates and returns exploration from a YAML text string."""
try:
exploration_dict = utils.dict_from_yaml(yaml_content)
except Exception as e:
raise Exception(
'Please ensure that you are uploading a YAML text file, not '
'a zip file. The YAML parser returned the following error: %s'
% e)
exploration_schema_version = exploration_dict.get('schema_version')
if exploration_schema_version is None:
raise Exception('Invalid YAML file: no schema version specified.')
if not (1 <= exploration_schema_version
<= cls.CURRENT_EXPLORATION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1, v2, v3 and v4 YAML files at '
'present.')
if exploration_schema_version == 1:
exploration_dict = cls._convert_v1_dict_to_v2_dict(
exploration_dict)
exploration_schema_version = 2
if exploration_schema_version == 2:
exploration_dict = cls._convert_v2_dict_to_v3_dict(
exploration_dict)
exploration_schema_version = 3
if exploration_schema_version == 3:
exploration_dict = cls._convert_v3_dict_to_v4_dict(
exploration_dict)
exploration_schema_version = 4
if exploration_schema_version == 4:
exploration_dict = cls._convert_v4_dict_to_v5_dict(
exploration_dict)
exploration_schema_version = 5
exploration = cls.create_default_exploration(
exploration_id, title, category,
objective=exploration_dict['objective'],
language_code=exploration_dict['language_code'])
exploration.tags = exploration_dict['tags']
exploration.blurb = exploration_dict['blurb']
exploration.author_notes = exploration_dict['author_notes']
exploration.param_specs = {
ps_name: param_domain.ParamSpec.from_dict(ps_val) for
(ps_name, ps_val) in exploration_dict['param_specs'].iteritems()
}
init_state_name = exploration_dict['init_state_name']
exploration.rename_state(exploration.init_state_name, init_state_name)
exploration.add_states([
state_name for state_name in exploration_dict['states']
if state_name != init_state_name])
for (state_name, sdict) in exploration_dict['states'].iteritems():
state = exploration.states[state_name]
state.content = [
Content(item['type'], html_cleaner.clean(item['value']))
for item in sdict['content']
]
state.param_changes = [param_domain.ParamChange(
pc['name'], pc['generator_id'], pc['customization_args']
) for pc in sdict['param_changes']]
for pc in state.param_changes:
if pc.name not in exploration.param_specs:
raise Exception('Parameter %s was used in a state but not '
'declared in the exploration param_specs.'
% pc.name)
idict = sdict['interaction']
interaction_handlers = [
AnswerHandlerInstance.from_dict_and_obj_type({
'name': handler['name'],
'rule_specs': [{
'definition': rule_spec['definition'],
'dest': rule_spec['dest'],
'feedback': [html_cleaner.clean(feedback)
for feedback in rule_spec['feedback']],
'param_changes': rule_spec.get('param_changes', []),
} for rule_spec in handler['rule_specs']],
}, InteractionInstance._get_obj_type(idict['id']))
for handler in idict['handlers']]
state.interaction = InteractionInstance(
idict['id'], idict['customization_args'],
interaction_handlers)
exploration.states[state_name] = state
exploration.default_skin = exploration_dict['default_skin']
exploration.param_changes = [
param_domain.ParamChange.from_dict(pc)
for pc in exploration_dict['param_changes']]
exploration.skin_instance = SkinInstance(
exploration_dict['default_skin'],
exploration_dict['skin_customizations'])
return exploration
def to_yaml(self):
return utils.yaml_from_dict({
'author_notes': self.author_notes,
'blurb': self.blurb,
'default_skin': self.default_skin,
'init_state_name': self.init_state_name,
'language_code': self.language_code,
'objective': self.objective,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'tags': self.tags,
'skin_customizations': self.skin_instance.to_dict()[
'skin_customizations'],
'states': {state_name: state.to_dict()
for (state_name, state) in self.states.iteritems()},
'schema_version': self.CURRENT_EXPLORATION_SCHEMA_VERSION
})
def to_player_dict(self):
"""Returns a copy of the exploration suitable for inclusion in the
learner view."""
return {
'init_state_name': self.init_state_name,
'param_changes': self.param_change_dicts,
'param_specs': self.param_specs_dict,
'states': {
state_name: state.to_dict()
for (state_name, state) in self.states.iteritems()
},
'title': self.title,
}
def get_interaction_ids(self):
"""Get all interaction ids used in this exploration."""
return list(set([
state.interaction.id for state in self.states.values()]))
class ExplorationSummary(object):
"""Domain object for an Oppia exploration summary."""
def __init__(self, exploration_id, title, category, objective,
language_code, tags, ratings, status,
community_owned, owner_ids, editor_ids,
viewer_ids, version, exploration_model_created_on,
exploration_model_last_updated):
"""'ratings' is a dict whose keys are '1', '2', '3', '4', '5' and whose
values are nonnegative integers representing frequency counts. Note
that the keys need to be strings in order for this dict to be
JSON-serializable.
"""
self.id = exploration_id
self.title = title
self.category = category
self.objective = objective
self.language_code = language_code
self.tags = tags
self.ratings = ratings
self.status = status
self.community_owned = community_owned
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.viewer_ids = viewer_ids
self.version = version
self.exploration_model_created_on = exploration_model_created_on
self.exploration_model_last_updated = exploration_model_last_updated
| apache-2.0 |
rrampage/rethinkdb | test/rql_test/connections/http_support/jinja2/parser.py | 637 | 35186 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import next, imap
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| agpl-3.0 |
boxlab/UltraEnc-X | 0.3.1/model/main_functions.py | 1 | 1133 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
impelement the main functions required by enc.py.
'''
from random import randint
from model.requirements import lmap, repeat
def generate_otp(sheets, strength, length):
for sheet in range(sheets):
filename = "uexor_%s.uep" % (str(sheet),)
with open(filename, 'w') as output:
for i in range(length):
print(randint(1, strength), file=output)
def load_sheet(filename):
with open(filename, 'r') as sheet:
contents = sheet.read().splitlines()
return lmap(int, contents)
def plaintext_to_str(plaintext):
return ''.join(lmap(chr, plaintext))
def get_plaintext():
plaintext = input('Please type your message: ')
return lmap(ord, plaintext)
def load_file(filename):
with open(filename, 'r') as file:
contents = file.read().splitlines()
return lmap(int, contents)
def save_file(filename, data):
with open(filename, 'w') as file:
file.write('\n'.join(lmap(str, data)))
| gpl-3.0 |
fluxw42/youtube-dl | youtube_dl/extractor/shahid.py | 38 | 5255 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
parse_iso8601,
str_or_none,
urlencode_postdata,
clean_html,
)
class ShahidIE(InfoExtractor):
_NETRC_MACHINE = 'shahid'
_VALID_URL = r'https?://shahid\.mbc\.net/ar/(?P<type>episode|movie)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://shahid.mbc.net/ar/episode/90574/%D8%A7%D9%84%D9%85%D9%84%D9%83-%D8%B9%D8%A8%D8%AF%D8%A7%D9%84%D9%84%D9%87-%D8%A7%D9%84%D8%A5%D9%86%D8%B3%D8%A7%D9%86-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D9%83%D9%84%D9%8A%D8%A8-3.html',
'info_dict': {
'id': '90574',
'ext': 'mp4',
'title': 'الملك عبدالله الإنسان الموسم 1 كليب 3',
'description': 'الفيلم الوثائقي - الملك عبد الله الإنسان',
'duration': 2972,
'timestamp': 1422057420,
'upload_date': '20150123',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'https://shahid.mbc.net/ar/movie/151746/%D8%A7%D9%84%D9%82%D9%86%D8%A7%D8%B5%D8%A9.html',
'only_matching': True
}, {
# shahid plus subscriber only
'url': 'https://shahid.mbc.net/ar/episode/90511/%D9%85%D8%B1%D8%A7%D9%8A%D8%A7-2011-%D8%A7%D9%84%D9%85%D9%88%D8%B3%D9%85-1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1.html',
'only_matching': True
}]
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
try:
user_data = self._download_json(
'https://shahid.mbc.net/wd/service/users/login',
None, 'Logging in', data=json.dumps({
'email': email,
'password': password,
'basic': 'false',
}).encode('utf-8'), headers={
'Content-Type': 'application/json; charset=UTF-8',
})['user']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
fail_data = self._parse_json(
e.cause.read().decode('utf-8'), None, fatal=False)
if fail_data:
faults = fail_data.get('faults', [])
faults_message = ', '.join([clean_html(fault['userMessage']) for fault in faults if fault.get('userMessage')])
if faults_message:
raise ExtractorError(faults_message, expected=True)
raise
self._download_webpage(
'https://shahid.mbc.net/populateContext',
None, 'Populate Context', data=urlencode_postdata({
'firstName': user_data['firstName'],
'lastName': user_data['lastName'],
'userName': user_data['email'],
'csg_user_name': user_data['email'],
'subscriberId': user_data['id'],
'sessionId': user_data['sessionId'],
}))
def _get_api_data(self, response):
data = response.get('data', {})
error = data.get('error')
if error:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, '\n'.join(error.values())),
expected=True)
return data
def _real_extract(self, url):
page_type, video_id = re.match(self._VALID_URL, url).groups()
player = self._get_api_data(self._download_json(
'https://shahid.mbc.net/arContent/getPlayerContent-param-.id-%s.type-player.html' % video_id,
video_id, 'Downloading player JSON'))
if player.get('drm'):
raise ExtractorError('This video is DRM protected.', expected=True)
formats = self._extract_m3u8_formats(player['url'], video_id, 'mp4')
self._sort_formats(formats)
video = self._get_api_data(self._download_json(
'http://api.shahid.net/api/v1_1/%s/%s' % (page_type, video_id),
video_id, 'Downloading video JSON', query={
'apiKey': 'sh@hid0nlin3',
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
}))[page_type]
title = video['title']
categories = [
category['name']
for category in video.get('genres', []) if 'name' in category]
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('thumbnailUrl'),
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('referenceDate')),
'categories': categories,
'series': video.get('showTitle') or video.get('showName'),
'season': video.get('seasonTitle'),
'season_number': int_or_none(video.get('seasonNumber')),
'season_id': str_or_none(video.get('seasonId')),
'episode_number': int_or_none(video.get('number')),
'episode_id': video_id,
'formats': formats,
}
| unlicense |
tima/ansible | lib/ansible/modules/network/asa/asa_command.py | 20 | 5713 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: asa_command
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Run arbitrary commands on Cisco ASA devices
description:
- Sends arbitrary commands to an ASA node and returns the results
read from the device. The C(asa_command) module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: asa
options:
commands:
description:
- List of commands to send to the remote device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
transport: cli
---
- asa_command:
commands:
- show version
provider: "{{ cli }}"
- asa_command:
commands:
- show asp drop
- show memory
provider: "{{ cli }}"
- asa_command:
commands:
- show version
provider: "{{ cli }}"
context: system
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.asa.asa import asa_argument_spec, check_args
from ansible.module_utils.network.asa.asa import run_commands
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def main():
spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
spec.update(asa_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
check_args(module)
result = {'changed': False}
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
commands = module.params['commands']
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
fkorotkov/pants | contrib/android/tests/python/pants_test/contrib/android/test_android_manifest_parser.py | 14 | 3340 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.util.xml_test_base import XmlTestBase
from pants.contrib.android.android_manifest_parser import AndroidManifest, AndroidManifestParser
class TestAndroidManifestParser(XmlTestBase):
"""Test the AndroidManifestParser and AndroidManifest classes."""
# Test AndroidManifestParser.parse_manifest().
def test_parse_manifest(self):
with self.xml_file() as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.path, xml)
def test_bad_parse_manifest(self):
xml = '/no/file/here'
with self.assertRaises(AndroidManifestParser.BadManifestError):
AndroidManifestParser.parse_manifest(xml)
# Test AndroidManifest.package_name.
def test_package_name(self):
with self.xml_file() as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.package_name, 'org.pantsbuild.example.hello')
def test_missing_manifest_element(self):
with self.xml_file(manifest_element='some_other_element') as xml:
with self.assertRaises(AndroidManifestParser.BadManifestError):
AndroidManifestParser.parse_manifest(xml)
def test_missing_package_attribute(self):
with self.xml_file(package_attribute='bad_value') as xml:
with self.assertRaises(AndroidManifestParser.BadManifestError):
AndroidManifestParser.parse_manifest(xml)
def test_weird_package_name(self):
# Should accept unexpected package names, the info gets verified in classes that consume it.
with self.xml_file(package_value='cola') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.package_name, 'cola')
# Test AndroidManifest.target_sdk.
def test_target_sdk(self):
with self.xml_file() as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.target_sdk, '19')
# These next tests show AndroidManifest.target_sdk fails silently and returns None.
def test_no_uses_sdk_element(self):
with self.xml_file(uses_sdk_element='something-random') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertIsNone(manifest.target_sdk)
def test_no_target_sdk_value(self):
with self.xml_file(android_attribute='android:bad_value') as xml:
parsed = AndroidManifestParser.parse_manifest(xml)
self.assertIsNone(parsed.target_sdk)
def test_no_android_part(self):
with self.xml_file(android_attribute='unrelated:targetSdkVersion') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertEqual(manifest.package_name, 'org.pantsbuild.example.hello')
def test_missing_whole_targetsdk(self):
with self.xml_file(android_attribute='unrelated:cola') as xml:
manifest = AndroidManifestParser.parse_manifest(xml)
self.assertIsNone(manifest.target_sdk)
# Test AndroidManifest().
def test_android_manifest(self):
with self.xml_file() as xml:
test = AndroidManifest(xml, '19', 'com.foo.bar')
self.assertEqual(test.path, xml)
| apache-2.0 |
eurosata1/e2 | lib/python/Screens/LocationBox.py | 2 | 16935 | #
# Generic Screen to select a path/filename combination
#
# GUI (Screens)
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.InputBox import InputBox
from Screens.HelpMenu import HelpableScreen
from Screens.ChoiceBox import ChoiceBox
# Generic
from Tools.BoundFunction import boundFunction
from Tools.Directories import *
from Components.config import config
import os
# Quickselect
from Tools.NumericalTextInput import NumericalTextInput
# GUI (Components)
from Components.ActionMap import NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.FileList import FileList
from Components.MenuList import MenuList
# Timer
from enigma import eTimer
defaultInhibitDirs = ["/bin", "/boot", "/dev", "/etc", "/lib", "/proc", "/sbin", "/sys", "/usr", "/var"]
class LocationBox(Screen, NumericalTextInput, HelpableScreen):
"""Simple Class similar to MessageBox / ChoiceBox but used to choose a folder/pathname combination"""
skin = """<screen name="LocationBox" position="100,75" size="540,460" >
<widget name="text" position="0,2" size="540,22" font="Regular;22" />
<widget name="target" position="0,23" size="540,22" valign="center" font="Regular;22" />
<widget name="filelist" position="0,55" zPosition="1" size="540,210" scrollbarMode="showOnDemand" selectionDisabled="1" />
<widget name="textbook" position="0,272" size="540,22" font="Regular;22" />
<widget name="booklist" position="5,302" zPosition="2" size="535,100" scrollbarMode="showOnDemand" />
<widget name="red" position="0,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="green" position="135,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<widget name="key_green" position="135,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="yellow" position="270,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<widget name="key_yellow" position="270,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="blue" position="405,415" zPosition="1" size="135,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_blue" position="405,415" zPosition="2" size="135,40" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, text = "", filename = "", currDir = None, bookmarks = None, userMode = False, windowTitle = "Select location", minFree = None, autoAdd = False, editDir = False, inhibitDirs = [], inhibitMounts = []):
# Init parents
Screen.__init__(self, session)
NumericalTextInput.__init__(self, handleTimeout = False)
HelpableScreen.__init__(self)
# Set useable chars
self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz')
# Quickselect Timer
self.qs_timer = eTimer()
self.qs_timer.callback.append(self.timeout)
self.qs_timer_type = 0
# Initialize Quickselect
self.curr_pos = -1
self.quickselect = ""
# Set Text
self["text"] = Label(text)
self["textbook"] = Label(_("Bookmarks"))
# Save parameters locally
self.text = text
self.filename = filename
self.minFree = minFree
self.realBookmarks = bookmarks
self.bookmarks = bookmarks and bookmarks.value[:] or []
self.userMode = userMode
self.autoAdd = autoAdd
self.editDir = editDir
self.inhibitDirs = inhibitDirs
# Initialize FileList
self["filelist"] = FileList(currDir, showDirectories = True, showFiles = False, inhibitMounts = inhibitMounts, inhibitDirs = inhibitDirs)
# Initialize BookList
self["booklist"] = MenuList(self.bookmarks)
# Buttons
self["key_green"] = Button(_("OK"))
self["key_yellow"] = Button(_("Rename"))
self["key_blue"] = Button(_("Remove bookmark"))
self["key_red"] = Button(_("Cancel"))
# Background for Buttons
self["green"] = Pixmap()
self["yellow"] = Pixmap()
self["blue"] = Pixmap()
self["red"] = Pixmap()
# Initialize Target
self["target"] = Label()
if self.userMode:
self.usermodeOn()
# Custom Action Handler
class LocationBoxActionMap(HelpableActionMap):
def __init__(self, parent, context, actions = { }, prio=0):
HelpableActionMap.__init__(self, parent, context, actions, prio)
self.box = parent
def action(self, contexts, action):
# Reset Quickselect
self.box.timeout(force = True)
return HelpableActionMap.action(self, contexts, action)
# Actions that will reset quickselect
self["WizardActions"] = LocationBoxActionMap(self, "WizardActions",
{
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"ok": (self.ok, _("select")),
"back": (self.cancel, _("Cancel")),
}, -2)
self["ColorActions"] = LocationBoxActionMap(self, "ColorActions",
{
"red": self.cancel,
"green": self.select,
"yellow": self.changeName,
"blue": self.addRemoveBookmark,
}, -2)
self["EPGSelectActions"] = LocationBoxActionMap(self, "EPGSelectActions",
{
"prevBouquet": (self.switchToBookList, _("switch to bookmarks")),
"nextBouquet": (self.switchToFileList, _("switch to filelist")),
}, -2)
self["MenuActions"] = LocationBoxActionMap(self, "MenuActions",
{
"menu": (self.showMenu, _("menu")),
}, -2)
# Actions used by quickselect
self["NumberActions"] = NumberActionMap(["NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
# Run some functions when shown
self.onShown.extend((
boundFunction(self.setTitle, _(windowTitle)),
self.updateTarget,
self.showHideRename,
))
self.onLayoutFinish.append(self.switchToFileListOnStart)
# Make sure we remove our callback
self.onClose.append(self.disableTimer)
def switchToFileListOnStart(self):
if self.realBookmarks and self.realBookmarks.value:
self.currList = "booklist"
currDir = self["filelist"].current_directory
if currDir in self.bookmarks:
self["booklist"].moveToIndex(self.bookmarks.index(currDir))
else:
self.switchToFileList()
def disableTimer(self):
self.qs_timer.callback.remove(self.timeout)
def showHideRename(self):
# Don't allow renaming when filename is empty
if self.filename == "":
self["key_yellow"].hide()
def switchToFileList(self):
if not self.userMode:
self.currList = "filelist"
self["filelist"].selectionEnabled(1)
self["booklist"].selectionEnabled(0)
self["key_blue"].text = _("Add bookmark")
self.updateTarget()
def switchToBookList(self):
self.currList = "booklist"
self["filelist"].selectionEnabled(0)
self["booklist"].selectionEnabled(1)
self["key_blue"].text = _("Remove bookmark")
self.updateTarget()
def addRemoveBookmark(self):
if self.currList == "filelist":
# add bookmark
folder = self["filelist"].getSelection()[0]
if folder is not None and not folder in self.bookmarks:
self.bookmarks.append(folder)
self.bookmarks.sort()
self["booklist"].setList(self.bookmarks)
else:
# remove bookmark
if not self.userMode:
name = self["booklist"].getCurrent()
self.session.openWithCallback(
boundFunction(self.removeBookmark, name),
MessageBox,
_("Do you really want to remove your bookmark of %s?") % (name),
)
def removeBookmark(self, name, ret):
if not ret:
return
if name in self.bookmarks:
self.bookmarks.remove(name)
self["booklist"].setList(self.bookmarks)
def createDir(self):
if self["filelist"].current_directory != None:
self.session.openWithCallback(
self.createDirCallback,
InputBox,
title = _("Please enter name of the new directory"),
text = self.filename
)
def createDirCallback(self, res):
if res:
path = os.path.join(self["filelist"].current_directory, res)
if not pathExists(path):
if not createDir(path):
self.session.open(
MessageBox,
_("Creating directory %s failed.") % (path),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
self["filelist"].refresh()
else:
self.session.open(
MessageBox,
_("The path %s already exists.") % (path),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def removeDir(self):
sel = self["filelist"].getSelection()
if sel and pathExists(sel[0]):
self.session.openWithCallback(
boundFunction(self.removeDirCallback, sel[0]),
MessageBox,
_("Do you really want to remove directory %s from the disk?") % (sel[0]),
type = MessageBox.TYPE_YESNO
)
else:
self.session.open(
MessageBox,
_("Invalid directory selected: %s") % (sel[0]),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def removeDirCallback(self, name, res):
if res:
if not removeDir(name):
self.session.open(
MessageBox,
_("Removing directory %s failed. (Maybe not empty.)") % (name),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
else:
self["filelist"].refresh()
self.removeBookmark(name, True)
val = self.realBookmarks and self.realBookmarks.value
if val and name in val:
val.remove(name)
self.realBookmarks.value = val
self.realBookmarks.save()
def up(self):
self[self.currList].up()
self.updateTarget()
def down(self):
self[self.currList].down()
self.updateTarget()
def left(self):
self[self.currList].pageUp()
self.updateTarget()
def right(self):
self[self.currList].pageDown()
self.updateTarget()
def ok(self):
if self.currList == "filelist":
if self["filelist"].canDescent():
self["filelist"].descent()
self.updateTarget()
else:
self.select()
def cancel(self):
self.close(None)
def getPreferredFolder(self):
if self.currList == "filelist":
# XXX: We might want to change this for parent folder...
return self["filelist"].getSelection()[0]
else:
return self["booklist"].getCurrent()
def selectConfirmed(self, ret):
if ret:
ret = ''.join((self.getPreferredFolder(), self.filename))
if self.realBookmarks:
if self.autoAdd and not ret in self.bookmarks:
if self.getPreferredFolder() not in self.bookmarks:
self.bookmarks.append(self.getPreferredFolder())
self.bookmarks.sort()
if self.bookmarks != self.realBookmarks.value:
self.realBookmarks.value = self.bookmarks
self.realBookmarks.save()
if self.filename and not pathExists(ret):
menu = [(_("Create new folder and exit"), "folder"), (_("Save and exit"), "exit")]
text = _("Select action")
def dirAction(choice):
if choice:
if choice[1] == "folder":
if not createDir(ret):
self.session.open(MessageBox, _("Creating directory %s failed.") % (ret), type = MessageBox.TYPE_ERROR)
return
self.close(ret)
else:
self.cancel()
self.session.openWithCallback(dirAction, ChoiceBox, title=text, list=menu)
return
self.close(ret)
def select(self):
currentFolder = self.getPreferredFolder()
# Do nothing unless current Directory is valid
if currentFolder is not None:
# Check if we need to have a minimum of free Space available
if self.minFree is not None:
# Try to read fs stats
try:
s = os.statvfs(currentFolder)
if (s.f_bavail * s.f_bsize) / 1000000 > self.minFree:
# Automatically confirm if we have enough free disk Space available
return self.selectConfirmed(True)
except OSError:
pass
# Ask User if he really wants to select this folder
self.session.openWithCallback(
self.selectConfirmed,
MessageBox,
_("There might not be enough Space on the selected Partition.\nDo you really want to continue?"),
type = MessageBox.TYPE_YESNO
)
# No minimum free Space means we can safely close
else:
self.selectConfirmed(True)
def changeName(self):
if self.filename != "":
# TODO: Add Information that changing extension is bad? disallow?
self.session.openWithCallback(
self.nameChanged,
InputBox,
title = _("Please enter a new filename"),
text = self.filename
)
def nameChanged(self, res):
if res is not None:
if len(res):
self.filename = res
self.updateTarget()
else:
self.session.open(
MessageBox,
_("An empty filename is illegal."),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def updateTarget(self):
# Write Combination of Folder & Filename when Folder is valid
currFolder = self.getPreferredFolder()
if currFolder is not None:
self["target"].setText(''.join((currFolder, self.filename)))
# Display a Warning otherwise
else:
self["target"].setText(_("Invalid location"))
def showMenu(self):
if not self.userMode and self.realBookmarks:
if self.currList == "filelist":
menu = [
(_("switch to bookmarks"), self.switchToBookList),
(_("add bookmark"), self.addRemoveBookmark)
]
if self.editDir:
menu.extend((
(_("create directory"), self.createDir),
(_("remove directory"), self.removeDir)
))
else:
menu = (
(_("switch to filelist"), self.switchToFileList),
(_("remove bookmark"), self.addRemoveBookmark)
)
self.session.openWithCallback(
self.menuCallback,
ChoiceBox,
title = "",
list = menu
)
def menuCallback(self, choice):
if choice:
choice[1]()
def usermodeOn(self):
self.switchToBookList()
self["filelist"].hide()
self["key_blue"].hide()
def keyNumberGlobal(self, number):
# Cancel Timeout
self.qs_timer.stop()
# See if another key was pressed before
if number != self.lastKey:
# Reset lastKey again so NumericalTextInput triggers its keychange
self.nextKey()
# Try to select what was typed
self.selectByStart()
# Increment position
self.curr_pos += 1
# Get char and append to text
char = self.getKey(number)
self.quickselect = self.quickselect[:self.curr_pos] + unicode(char)
# Start Timeout
self.qs_timer_type = 0
self.qs_timer.start(1000, 1)
def selectByStart(self):
# Don't do anything on initial call
if not self.quickselect:
return
# Don't select if no dir
if self["filelist"].getCurrentDirectory():
# TODO: implement proper method in Components.FileList
files = self["filelist"].getFileList()
# Initialize index
idx = 0
# We select by filename which is absolute
lookfor = self["filelist"].getCurrentDirectory() + self.quickselect
# Select file starting with generated text
for file in files:
if file[0][0] and file[0][0].lower().startswith(lookfor):
self["filelist"].instance.moveSelectionTo(idx)
break
idx += 1
def timeout(self, force = False):
# Timeout Key
if not force and self.qs_timer_type == 0:
# Try to select what was typed
self.selectByStart()
# Reset Key
self.lastKey = -1
# Change type
self.qs_timer_type = 1
# Start timeout again
self.qs_timer.start(1000, 1)
# Timeout Quickselect
else:
# Eventually stop Timer
self.qs_timer.stop()
# Invalidate
self.lastKey = -1
self.curr_pos = -1
self.quickselect = ""
def __repr__(self):
return str(type(self)) + "(" + self.text + ")"
def MovieLocationBox(session, text, dir, filename = "", minFree = None):
return LocationBox(session, text = text, filename = filename, currDir = dir, bookmarks = config.movielist.videodirs, autoAdd = True, editDir = True, inhibitDirs = defaultInhibitDirs, minFree = minFree)
class TimeshiftLocationBox(LocationBox):
def __init__(self, session):
LocationBox.__init__(
self,
session,
text = _("Where to save temporary timeshift recordings?"),
currDir = config.usage.timeshift_path.value,
bookmarks = config.usage.allowed_timeshift_paths,
autoAdd = True,
editDir = True,
inhibitDirs = defaultInhibitDirs,
minFree = 1024 # the same requirement is hardcoded in servicedvb.cpp
)
self.skinName = "LocationBox"
def cancel(self):
config.usage.timeshift_path.cancel()
LocationBox.cancel(self)
def selectConfirmed(self, ret):
if ret:
config.usage.timeshift_path.value = self.getPreferredFolder()
config.usage.timeshift_path.save()
LocationBox.selectConfirmed(self, ret)
| gpl-2.0 |
groovecoder/kuma | vendor/packages/nose/config.py | 48 | 25238 | import logging
import optparse
import os
import re
import sys
import ConfigParser
from optparse import OptionParser
from nose.util import absdir, tolist
from nose.plugins.manager import NoPlugins
from warnings import warn, filterwarnings
log = logging.getLogger(__name__)
# not allowed in config files
option_blacklist = ['help', 'verbose']
config_files = [
# Linux users will prefer this
"~/.noserc",
# Windows users will prefer this
"~/nose.cfg"
]
# plaforms on which the exe check defaults to off
# Windows and IronPython
exe_allowed_platforms = ('win32', 'cli')
filterwarnings("always", category=DeprecationWarning,
module=r'(.*\.)?nose\.config')
class NoSuchOptionError(Exception):
def __init__(self, name):
Exception.__init__(self, name)
self.name = name
class ConfigError(Exception):
pass
class ConfiguredDefaultsOptionParser(object):
"""
Handler for options from commandline and config files.
"""
def __init__(self, parser, config_section, error=None, file_error=None):
self._parser = parser
self._config_section = config_section
if error is None:
error = self._parser.error
self._error = error
if file_error is None:
file_error = lambda msg, **kw: error(msg)
self._file_error = file_error
def _configTuples(self, cfg, filename):
config = []
if self._config_section in cfg.sections():
for name, value in cfg.items(self._config_section):
config.append((name, value, filename))
return config
def _readFromFilenames(self, filenames):
config = []
for filename in filenames:
cfg = ConfigParser.RawConfigParser()
try:
cfg.read(filename)
except ConfigParser.Error, exc:
raise ConfigError("Error reading config file %r: %s" %
(filename, str(exc)))
config.extend(self._configTuples(cfg, filename))
return config
def _readFromFileObject(self, fh):
cfg = ConfigParser.RawConfigParser()
try:
filename = fh.name
except AttributeError:
filename = '<???>'
try:
cfg.readfp(fh)
except ConfigParser.Error, exc:
raise ConfigError("Error reading config file %r: %s" %
(filename, str(exc)))
return self._configTuples(cfg, filename)
def _readConfiguration(self, config_files):
try:
config_files.readline
except AttributeError:
filename_or_filenames = config_files
if isinstance(filename_or_filenames, basestring):
filenames = [filename_or_filenames]
else:
filenames = filename_or_filenames
config = self._readFromFilenames(filenames)
else:
fh = config_files
config = self._readFromFileObject(fh)
return config
def _processConfigValue(self, name, value, values, parser):
opt_str = '--' + name
option = parser.get_option(opt_str)
if option is None:
raise NoSuchOptionError(name)
else:
option.process(opt_str, value, values, parser)
def _applyConfigurationToValues(self, parser, config, values):
for name, value, filename in config:
if name in option_blacklist:
continue
try:
self._processConfigValue(name, value, values, parser)
except NoSuchOptionError, exc:
self._file_error(
"Error reading config file %r: "
"no such option %r" % (filename, exc.name),
name=name, filename=filename)
except optparse.OptionValueError, exc:
msg = str(exc).replace('--' + name, repr(name), 1)
self._file_error("Error reading config file %r: "
"%s" % (filename, msg),
name=name, filename=filename)
def parseArgsAndConfigFiles(self, args, config_files):
values = self._parser.get_default_values()
try:
config = self._readConfiguration(config_files)
except ConfigError, exc:
self._error(str(exc))
else:
try:
self._applyConfigurationToValues(self._parser, config, values)
except ConfigError, exc:
self._error(str(exc))
return self._parser.parse_args(args, values)
class Config(object):
"""nose configuration.
Instances of Config are used throughout nose to configure
behavior, including plugin lists. Here are the default values for
all config keys::
self.env = env = kw.pop('env', {})
self.args = ()
self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
self.addPaths = not env.get('NOSE_NOPATH', False)
self.configSection = 'nosetests'
self.debug = env.get('NOSE_DEBUG')
self.debugLog = env.get('NOSE_DEBUG_LOG')
self.exclude = None
self.getTestCaseNamesCompat = False
self.includeExe = env.get('NOSE_INCLUDE_EXE',
sys.platform in exe_allowed_platforms)
self.ignoreFiles = (re.compile(r'^\.'),
re.compile(r'^_'),
re.compile(r'^setup\.py$')
)
self.include = None
self.loggingConfig = None
self.logStream = sys.stderr
self.options = NoOptions()
self.parser = None
self.plugins = NoPlugins()
self.srcDirs = ('lib', 'src')
self.runOnInit = True
self.stopOnError = env.get('NOSE_STOP', False)
self.stream = sys.stderr
self.testNames = ()
self.verbosity = int(env.get('NOSE_VERBOSE', 1))
self.where = ()
self.py3where = ()
self.workingDir = None
"""
def __init__(self, **kw):
self.env = env = kw.pop('env', {})
self.args = ()
self.testMatchPat = env.get('NOSE_TESTMATCH',
r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
self.testMatch = re.compile(self.testMatchPat)
self.addPaths = not env.get('NOSE_NOPATH', False)
self.configSection = 'nosetests'
self.debug = env.get('NOSE_DEBUG')
self.debugLog = env.get('NOSE_DEBUG_LOG')
self.exclude = None
self.getTestCaseNamesCompat = False
self.includeExe = env.get('NOSE_INCLUDE_EXE',
sys.platform in exe_allowed_platforms)
self.ignoreFilesDefaultStrings = [r'^\.',
r'^_',
r'^setup\.py$',
]
self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
self.include = None
self.loggingConfig = None
self.logStream = sys.stderr
self.options = NoOptions()
self.parser = None
self.plugins = NoPlugins()
self.srcDirs = ('lib', 'src')
self.runOnInit = True
self.stopOnError = env.get('NOSE_STOP', False)
self.stream = sys.stderr
self.testNames = []
self.verbosity = int(env.get('NOSE_VERBOSE', 1))
self.where = ()
self.py3where = ()
self.workingDir = os.getcwd()
self.traverseNamespace = False
self.firstPackageWins = False
self.parserClass = OptionParser
self.worker = False
self._default = self.__dict__.copy()
self.update(kw)
self._orig = self.__dict__.copy()
def __getstate__(self):
state = self.__dict__.copy()
del state['stream']
del state['_orig']
del state['_default']
del state['env']
del state['logStream']
# FIXME remove plugins, have only plugin manager class
state['plugins'] = self.plugins.__class__
return state
def __setstate__(self, state):
plugincls = state.pop('plugins')
self.update(state)
self.worker = True
# FIXME won't work for static plugin lists
self.plugins = plugincls()
self.plugins.loadPlugins()
# needed so .can_configure gets set appropriately
dummy_parser = self.parserClass()
self.plugins.addOptions(dummy_parser, {})
self.plugins.configure(self.options, self)
def __repr__(self):
d = self.__dict__.copy()
# don't expose env, could include sensitive info
d['env'] = {}
keys = [ k for k in d.keys()
if not k.startswith('_') ]
keys.sort()
return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
for k in keys ])
__str__ = __repr__
def _parseArgs(self, argv, cfg_files):
def warn_sometimes(msg, name=None, filename=None):
if (hasattr(self.plugins, 'excludedOption') and
self.plugins.excludedOption(name)):
msg = ("Option %r in config file %r ignored: "
"excluded by runtime environment" %
(name, filename))
warn(msg, RuntimeWarning)
else:
raise ConfigError(msg)
parser = ConfiguredDefaultsOptionParser(
self.getParser(), self.configSection, file_error=warn_sometimes)
return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
def configure(self, argv=None, doc=None):
"""Configure the nose running environment. Execute configure before
collecting tests with nose.TestCollector to enable output capture and
other features.
"""
env = self.env
if argv is None:
argv = sys.argv
cfg_files = getattr(self, 'files', [])
options, args = self._parseArgs(argv, cfg_files)
# If -c --config has been specified on command line,
# load those config files and reparse
if getattr(options, 'files', []):
options, args = self._parseArgs(argv, options.files)
self.options = options
if args:
self.testNames = args
if options.testNames is not None:
self.testNames.extend(tolist(options.testNames))
if options.py3where is not None:
if sys.version_info >= (3,):
options.where = options.py3where
# `where` is an append action, so it can't have a default value
# in the parser, or that default will always be in the list
if not options.where:
options.where = env.get('NOSE_WHERE', None)
# include and exclude also
if not options.ignoreFiles:
options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
if not options.include:
options.include = env.get('NOSE_INCLUDE', [])
if not options.exclude:
options.exclude = env.get('NOSE_EXCLUDE', [])
self.addPaths = options.addPaths
self.stopOnError = options.stopOnError
self.verbosity = options.verbosity
self.includeExe = options.includeExe
self.traverseNamespace = options.traverseNamespace
self.debug = options.debug
self.debugLog = options.debugLog
self.loggingConfig = options.loggingConfig
self.firstPackageWins = options.firstPackageWins
self.configureLogging()
if not options.byteCompile:
sys.dont_write_bytecode = True
if options.where is not None:
self.configureWhere(options.where)
if options.testMatch:
self.testMatch = re.compile(options.testMatch)
if options.ignoreFiles:
self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
log.info("Ignoring files matching %s", options.ignoreFiles)
else:
log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
if options.include:
self.include = map(re.compile, tolist(options.include))
log.info("Including tests matching %s", options.include)
if options.exclude:
self.exclude = map(re.compile, tolist(options.exclude))
log.info("Excluding tests matching %s", options.exclude)
# When listing plugins we don't want to run them
if not options.showPlugins:
self.plugins.configure(options, self)
self.plugins.begin()
def configureLogging(self):
"""Configure logging for nose, or optionally other packages. Any logger
name may be set with the debug option, and that logger will be set to
debug level and be assigned the same handler as the nose loggers, unless
it already has a handler.
"""
if self.loggingConfig:
from logging.config import fileConfig
fileConfig(self.loggingConfig)
return
format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
if self.debugLog:
handler = logging.FileHandler(self.debugLog)
else:
handler = logging.StreamHandler(self.logStream)
handler.setFormatter(format)
logger = logging.getLogger('nose')
logger.propagate = 0
# only add our default handler if there isn't already one there
# this avoids annoying duplicate log messages.
found = False
if self.debugLog:
debugLogAbsPath = os.path.abspath(self.debugLog)
for h in logger.handlers:
if type(h) == logging.FileHandler and \
h.baseFilename == debugLogAbsPath:
found = True
else:
for h in logger.handlers:
if type(h) == logging.StreamHandler and \
h.stream == self.logStream:
found = True
if not found:
logger.addHandler(handler)
# default level
lvl = logging.WARNING
if self.verbosity >= 5:
lvl = 0
elif self.verbosity >= 4:
lvl = logging.DEBUG
elif self.verbosity >= 3:
lvl = logging.INFO
logger.setLevel(lvl)
# individual overrides
if self.debug:
# no blanks
debug_loggers = [ name for name in self.debug.split(',')
if name ]
for logger_name in debug_loggers:
l = logging.getLogger(logger_name)
l.setLevel(logging.DEBUG)
if not l.handlers and not logger_name.startswith('nose'):
l.addHandler(handler)
def configureWhere(self, where):
"""Configure the working directory or directories for the test run.
"""
from nose.importer import add_path
self.workingDir = None
where = tolist(where)
warned = False
for path in where:
if not self.workingDir:
abs_path = absdir(path)
if abs_path is None:
raise ValueError("Working directory '%s' not found, or "
"not a directory" % path)
log.info("Set working dir to %s", abs_path)
self.workingDir = abs_path
if self.addPaths and \
os.path.exists(os.path.join(abs_path, '__init__.py')):
log.info("Working directory %s is a package; "
"adding to sys.path" % abs_path)
add_path(abs_path)
continue
if not warned:
warn("Use of multiple -w arguments is deprecated and "
"support may be removed in a future release. You can "
"get the same behavior by passing directories without "
"the -w argument on the command line, or by using the "
"--tests argument in a configuration file.",
DeprecationWarning)
warned = True
self.testNames.append(path)
def default(self):
"""Reset all config values to defaults.
"""
self.__dict__.update(self._default)
def getParser(self, doc=None):
"""Get the command line option parser.
"""
if self.parser:
return self.parser
env = self.env
parser = self.parserClass(doc)
parser.add_option(
"-V","--version", action="store_true",
dest="version", default=False,
help="Output nose version and exit")
parser.add_option(
"-p", "--plugins", action="store_true",
dest="showPlugins", default=False,
help="Output list of available plugins and exit. Combine with "
"higher verbosity for greater detail")
parser.add_option(
"-v", "--verbose",
action="count", dest="verbosity",
default=self.verbosity,
help="Be more verbose. [NOSE_VERBOSE]")
parser.add_option(
"--verbosity", action="store", dest="verbosity",
metavar='VERBOSITY',
type="int", help="Set verbosity; --verbosity=2 is "
"the same as -v")
parser.add_option(
"-q", "--quiet", action="store_const", const=0, dest="verbosity",
help="Be less verbose")
parser.add_option(
"-c", "--config", action="append", dest="files",
metavar="FILES",
help="Load configuration from config file(s). May be specified "
"multiple times; in that case, all config files will be "
"loaded and combined")
parser.add_option(
"-w", "--where", action="append", dest="where",
metavar="WHERE",
help="Look for tests in this directory. "
"May be specified multiple times. The first directory passed "
"will be used as the working directory, in place of the current "
"working directory, which is the default. Others will be added "
"to the list of tests to execute. [NOSE_WHERE]"
)
parser.add_option(
"--py3where", action="append", dest="py3where",
metavar="PY3WHERE",
help="Look for tests in this directory under Python 3.x. "
"Functions the same as 'where', but only applies if running under "
"Python 3.x or above. Note that, if present under 3.x, this "
"option completely replaces any directories specified with "
"'where', so the 'where' option becomes ineffective. "
"[NOSE_PY3WHERE]"
)
parser.add_option(
"-m", "--match", "--testmatch", action="store",
dest="testMatch", metavar="REGEX",
help="Files, directories, function names, and class names "
"that match this regular expression are considered tests. "
"Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
default=self.testMatchPat)
parser.add_option(
"--tests", action="store", dest="testNames", default=None,
metavar='NAMES',
help="Run these tests (comma-separated list). This argument is "
"useful mainly from configuration files; on the command line, "
"just pass the tests to run as additional arguments with no "
"switch.")
parser.add_option(
"-l", "--debug", action="store",
dest="debug", default=self.debug,
help="Activate debug logging for one or more systems. "
"Available debug loggers: nose, nose.importer, "
"nose.inspector, nose.plugins, nose.result and "
"nose.selector. Separate multiple names with a comma.")
parser.add_option(
"--debug-log", dest="debugLog", action="store",
default=self.debugLog, metavar="FILE",
help="Log debug messages to this file "
"(default: sys.stderr)")
parser.add_option(
"--logging-config", "--log-config",
dest="loggingConfig", action="store",
default=self.loggingConfig, metavar="FILE",
help="Load logging config from this file -- bypasses all other"
" logging config settings.")
parser.add_option(
"-I", "--ignore-files", action="append", dest="ignoreFiles",
metavar="REGEX",
help="Completely ignore any file that matches this regular "
"expression. Takes precedence over any other settings or "
"plugins. "
"Specifying this option will replace the default setting. "
"Specify this option multiple times "
"to add more regular expressions [NOSE_IGNORE_FILES]")
parser.add_option(
"-e", "--exclude", action="append", dest="exclude",
metavar="REGEX",
help="Don't run tests that match regular "
"expression [NOSE_EXCLUDE]")
parser.add_option(
"-i", "--include", action="append", dest="include",
metavar="REGEX",
help="This regular expression will be applied to files, "
"directories, function names, and class names for a chance "
"to include additional tests that do not match TESTMATCH. "
"Specify this option multiple times "
"to add more regular expressions [NOSE_INCLUDE]")
parser.add_option(
"-x", "--stop", action="store_true", dest="stopOnError",
default=self.stopOnError,
help="Stop running tests after the first error or failure")
parser.add_option(
"-P", "--no-path-adjustment", action="store_false",
dest="addPaths",
default=self.addPaths,
help="Don't make any changes to sys.path when "
"loading tests [NOSE_NOPATH]")
parser.add_option(
"--exe", action="store_true", dest="includeExe",
default=self.includeExe,
help="Look for tests in python modules that are "
"executable. Normal behavior is to exclude executable "
"modules, since they may not be import-safe "
"[NOSE_INCLUDE_EXE]")
parser.add_option(
"--noexe", action="store_false", dest="includeExe",
help="DO NOT look for tests in python modules that are "
"executable. (The default on the windows platform is to "
"do so.)")
parser.add_option(
"--traverse-namespace", action="store_true",
default=self.traverseNamespace, dest="traverseNamespace",
help="Traverse through all path entries of a namespace package")
parser.add_option(
"--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
action="store_true", default=False, dest="firstPackageWins",
help="nose's importer will normally evict a package from sys."
"modules if it sees a package with the same name in a different "
"location. Set this option to disable that behavior.")
parser.add_option(
"--no-byte-compile",
action="store_false", default=True, dest="byteCompile",
help="Prevent nose from byte-compiling the source into .pyc files "
"while nose is scanning for and running tests.")
self.plugins.loadPlugins()
self.pluginOpts(parser)
self.parser = parser
return parser
def help(self, doc=None):
"""Return the generated help message
"""
return self.getParser(doc).format_help()
def pluginOpts(self, parser):
self.plugins.addOptions(parser, self.env)
def reset(self):
self.__dict__.update(self._orig)
def todict(self):
return self.__dict__.copy()
def update(self, d):
self.__dict__.update(d)
class NoOptions(object):
"""Options container that returns None for all options.
"""
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
def __getnewargs__(self):
return ()
def __nonzero__(self):
return False
def user_config_files():
"""Return path to any existing user config files
"""
return filter(os.path.exists,
map(os.path.expanduser, config_files))
def all_config_files():
"""Return path to any existing user config files, plus any setup.cfg
in the current working directory.
"""
user = user_config_files()
if os.path.exists('setup.cfg'):
return user + ['setup.cfg']
return user
# used when parsing config files
def flag(val):
"""Does the value look like an on/off flag?"""
if val == 1:
return True
elif val == 0:
return False
val = str(val)
if len(val) > 5:
return False
return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
def _bool(val):
return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
| mpl-2.0 |
ngoix/OCRF | sklearn/neighbors/setup.py | 308 | 1219 | import os
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('neighbors', parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension('ball_tree',
sources=['ball_tree.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('kd_tree',
sources=['kd_tree.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('dist_metrics',
sources=['dist_metrics.c'],
include_dirs=[numpy.get_include(),
os.path.join(numpy.get_include(),
'numpy')],
libraries=libraries)
config.add_extension('typedefs',
sources=['typedefs.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
| bsd-3-clause |
dpausp/arguments | tests/concepts/document/test_documents.py | 1 | 1326 | import factory
from assert_helpers import assert_difference, assert_no_difference
from ekklesia_portal.datamodel import Document
from webtest_helpers import assert_deform, fill_form
def test_create_document(client, db_query, document_factory, proposition_type_factory, logged_in_department_admin):
department = logged_in_department_admin.managed_departments[0]
area = department.areas[0]
proposition_type = proposition_type_factory()
data = factory.build(dict, FACTORY_CLASS=document_factory)
del data['area']
data['area_id'] = area.id
del data['proposition_type']
data['proposition_type_id'] = proposition_type.id
res = client.get('/documents/+new')
form = assert_deform(res)
fill_form(form, data)
with assert_difference(db_query(Document).count, 1):
form.submit(status=302)
def test_update_document(db_session, client, document_factory, logged_in_department_admin):
department = logged_in_department_admin.managed_departments[0]
area = department.areas[0]
document = document_factory(area=area)
res = client.get(f'/documents/{document.id}/+edit')
expected = document.to_dict()
form = assert_deform(res, expected)
form['description'] = 'new description'
form.submit(status=302)
assert document.description == 'new description'
| agpl-3.0 |
leiferikb/bitpop | src/tools/python/google/gethash_timer.py | 182 | 4366 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Issue a series of GetHash requests to the SafeBrowsing servers and measure
the response times.
Usage:
$ ./gethash_timer.py --period=600 --samples=20 --output=resp.csv
--period (or -p): The amount of time (in seconds) to wait between GetHash
requests. Using a value of more than 300 (5 minutes) to
include the effect of DNS.
--samples (or -s): The number of requests to issue. If this parameter is not
specified, the test will run indefinitely.
--output (or -o): The path to a file where the output will be written in
CSV format: sample_number,response_code,elapsed_time_ms
"""
import getopt
import httplib
import sys
import time
_GETHASH_HOST = 'safebrowsing.clients.google.com'
_GETHASH_REQUEST = (
'/safebrowsing/gethash?client=googleclient&appver=1.0&pver=2.1')
# Global logging file handle.
g_file_handle = None
def IssueGetHash(prefix):
'''Issue one GetHash request to the safebrowsing servers.
Args:
prefix: A 4 byte value to look up on the server.
Returns:
The HTTP response code for the GetHash request.
'''
body = '4:4\n' + prefix
h = httplib.HTTPConnection(_GETHASH_HOST)
h.putrequest('POST', _GETHASH_REQUEST)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
response_code = h.getresponse().status
h.close()
return response_code
def TimedGetHash(prefix):
'''Measure the amount of time it takes to receive a GetHash response.
Args:
prefix: A 4 byte value to look up on the the server.
Returns:
A tuple of HTTP resonse code and the response time (in milliseconds).
'''
start = time.time()
response_code = IssueGetHash(prefix)
return response_code, (time.time() - start) * 1000
def RunTimedGetHash(period, samples=None):
'''Runs an experiment to measure the amount of time it takes to receive
multiple responses from the GetHash servers.
Args:
period: A floating point value that indicates (in seconds) the delay
between requests.
samples: An integer value indicating the number of requests to make.
If 'None', the test continues indefinitely.
Returns:
None.
'''
global g_file_handle
prefix = '\x50\x61\x75\x6c'
sample_count = 1
while True:
response_code, elapsed_time = TimedGetHash(prefix)
LogResponse(sample_count, response_code, elapsed_time)
sample_count += 1
if samples is not None and sample_count == samples:
break
time.sleep(period)
def LogResponse(sample_count, response_code, elapsed_time):
'''Output the response for one GetHash query.
Args:
sample_count: The current sample number.
response_code: The HTTP response code for the GetHash request.
elapsed_time: The round-trip time (in milliseconds) for the
GetHash request.
Returns:
None.
'''
global g_file_handle
output_list = (sample_count, response_code, elapsed_time)
print 'Request: %d, status: %d, elapsed time: %f ms' % output_list
if g_file_handle is not None:
g_file_handle.write(('%d,%d,%f' % output_list) + '\n')
g_file_handle.flush()
def SetupOutputFile(file_name):
'''Open a file for logging results.
Args:
file_name: A path to a file to store the output.
Returns:
None.
'''
global g_file_handle
g_file_handle = open(file_name, 'w')
def main():
period = 10
samples = None
options, args = getopt.getopt(sys.argv[1:],
's:p:o:',
['samples=', 'period=', 'output='])
for option, value in options:
if option == '-s' or option == '--samples':
samples = int(value)
elif option == '-p' or option == '--period':
period = float(value)
elif option == '-o' or option == '--output':
file_name = value
else:
print 'Bad option: %s' % option
return 1
try:
print 'Starting Timed GetHash ----------'
SetupOutputFile(file_name)
RunTimedGetHash(period, samples)
except KeyboardInterrupt:
pass
print 'Timed GetHash complete ----------'
g_file_handle.close()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
kn-bibs/dotplot | dotplot/matrices.py | 1 | 3681 | """This module works with similarity matrices of aminoacids"""
import os
available_matrices = {
'PAM120': 'matrices/PAM120.txt'
}
class SimilarityMatrix:
def __init__(self, name):
filename = available_matrices[name]
# get the raw matrix from file
matrix = self.read_raw_matrix(filename)
# get minimum and maximum value from the matrix
self.scaling_factors = self.get_min_and_max(matrix)
# transform numerical values from raw matrix into percentages
self.matrix = self.normalize(matrix)
@staticmethod
def get_min_and_max(matrix):
"""Get minimal and maximal value occuring in given matrix."""
aminoacids = list(matrix.keys())
minimal_value = None
maximal_value = None
if aminoacids:
first_aa = aminoacids[0]
minimal_value = matrix[first_aa][first_aa]
maximal_value = matrix[first_aa][first_aa]
for index_1, aa_1 in enumerate(aminoacids):
for index_2 in range(index_1, len(aminoacids)):
aa_2 = aminoacids[index_2]
value = matrix[aa_1][aa_2]
minimal_value = min(minimal_value, value)
maximal_value = max(maximal_value, value)
return {
'min': minimal_value,
'max': maximal_value
}
def normalize(self, matrix):
"""Transform numerical values from raw matrix into percentages.
For example: if we had values from -5 to 5, then now:
-5 will be 0,
5 will be 1,
0 will be 0.5, and so on.
"""
aminoacids = matrix.keys()
min_value = self.scaling_factors['min']
max_value = self.scaling_factors['max']
scale_range = max_value - min_value
for aa_1 in aminoacids:
for aa_2 in aminoacids:
value = matrix[aa_1][aa_2]
matrix[aa_1][aa_2] = (value - min_value) / scale_range
return matrix
@staticmethod
def read_raw_matrix(filename):
"""This function converts the matrix into a dictionary"""
path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
filename
)
with open(path) as f:
lines = f.readlines()
matrix = {}
# read aminoacids' order from first line and make all the letters
# representing aminoacids uppercase (so we don't need to think
# about this later
aa_list = [aa.upper() for aa in lines[0].split()]
# initialize matrix
for aa_name in aa_list:
matrix[aa_name] = {}
# set corresponding values for each aminoacid
for line in lines[1:]:
data = line.split()
aa_name = data[0].upper()
# exctract values from all the columns but the first one
# and convert them to intigers (from strings)
values = [
int(value)
for value in data[1:]
]
matrix[aa_name] = dict(zip(aa_list, values))
return matrix
def get_value(self, aa_1, aa_2):
"""This function returns similarity values for 2 aminoacids
Args:
aa_1: a letter representing first aminoacid
aa_2: a letter representing second aminoacid
"""
# we want to return correct value no matter if users gives us
# aa_1 = t, aa_2 = c or aa_1 = T, aa_2 = C, hence uppercase
aa_1 = aa_1.upper()
aa_2 = aa_2.upper()
return self.matrix[aa_1][aa_2]
| lgpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/local_network_gateway.py | 1 | 3220 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param local_network_address_space: Local network site address space.
:type local_network_address_space:
~azure.mgmt.network.v2017_11_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2017_11_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
LocalNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LocalNetworkGateway, self).__init__(**kwargs)
self.local_network_address_space = kwargs.get('local_network_address_space', None)
self.gateway_ip_address = kwargs.get('gateway_ip_address', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
| mit |
kmod/icbd | stdlib/python2.5/lib-tk/Canvas.py | 19 | 7344 | # This module exports classes for the various canvas item types
# NOTE: This module was an experiment and is now obsolete.
# It's best to use the Tkinter.Canvas class directly.
from Tkinter import Canvas, _cnfmerge, _flatten
class CanvasItem:
def __init__(self, canvas, itemType, *args, **kw):
self.canvas = canvas
self.id = canvas._create(itemType, args, kw)
if not hasattr(canvas, 'items'):
canvas.items = {}
canvas.items[self.id] = self
def __str__(self):
return str(self.id)
def __repr__(self):
return '<%s, id=%d>' % (self.__class__.__name__, self.id)
def delete(self):
del self.canvas.items[self.id]
self.canvas.delete(self.id)
def __getitem__(self, key):
v = self.canvas.tk.split(self.canvas.tk.call(
self.canvas._w, 'itemconfigure',
self.id, '-' + key))
return v[4]
cget = __getitem__
def __setitem__(self, key, value):
self.canvas.itemconfig(self.id, {key: value})
def keys(self):
if not hasattr(self, '_keys'):
self._keys = map(lambda x, tk=self.canvas.tk:
tk.splitlist(x)[0][1:],
self.canvas.tk.splitlist(
self.canvas._do(
'itemconfigure',
(self.id,))))
return self._keys
def has_key(self, key):
return key in self.keys()
def __contains__(self, key):
return key in self.keys()
def addtag(self, tag, option='withtag'):
self.canvas.addtag(tag, option, self.id)
def bbox(self):
x1, y1, x2, y2 = self.canvas.bbox(self.id)
return (x1, y1), (x2, y2)
def bind(self, sequence=None, command=None, add=None):
return self.canvas.tag_bind(self.id, sequence, command, add)
def unbind(self, sequence, funcid=None):
self.canvas.tag_unbind(self.id, sequence, funcid)
def config(self, cnf={}, **kw):
return self.canvas.itemconfig(self.id, _cnfmerge((cnf, kw)))
def coords(self, pts = ()):
flat = ()
for x, y in pts: flat = flat + (x, y)
return self.canvas.coords(self.id, *flat)
def dchars(self, first, last=None):
self.canvas.dchars(self.id, first, last)
def dtag(self, ttd):
self.canvas.dtag(self.id, ttd)
def focus(self):
self.canvas.focus(self.id)
def gettags(self):
return self.canvas.gettags(self.id)
def icursor(self, index):
self.canvas.icursor(self.id, index)
def index(self, index):
return self.canvas.index(self.id, index)
def insert(self, beforethis, string):
self.canvas.insert(self.id, beforethis, string)
def lower(self, belowthis=None):
self.canvas.tag_lower(self.id, belowthis)
def move(self, xamount, yamount):
self.canvas.move(self.id, xamount, yamount)
def tkraise(self, abovethis=None):
self.canvas.tag_raise(self.id, abovethis)
raise_ = tkraise # BW compat
def scale(self, xorigin, yorigin, xscale, yscale):
self.canvas.scale(self.id, xorigin, yorigin, xscale, yscale)
def type(self):
return self.canvas.type(self.id)
class Arc(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'arc', *args, **kw)
class Bitmap(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'bitmap', *args, **kw)
class ImageItem(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'image', *args, **kw)
class Line(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'line', *args, **kw)
class Oval(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'oval', *args, **kw)
class Polygon(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'polygon', *args, **kw)
class Rectangle(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'rectangle', *args, **kw)
# XXX "Text" is taken by the Text widget...
class CanvasText(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'text', *args, **kw)
class Window(CanvasItem):
def __init__(self, canvas, *args, **kw):
CanvasItem.__init__(self, canvas, 'window', *args, **kw)
class Group:
def __init__(self, canvas, tag=None):
if not tag:
tag = 'Group%d' % id(self)
self.tag = self.id = tag
self.canvas = canvas
self.canvas.dtag(self.tag)
def str(self):
return self.tag
__str__ = str
def _do(self, cmd, *args):
return self.canvas._do(cmd, (self.tag,) + _flatten(args))
def addtag_above(self, tagOrId):
self._do('addtag', 'above', tagOrId)
def addtag_all(self):
self._do('addtag', 'all')
def addtag_below(self, tagOrId):
self._do('addtag', 'below', tagOrId)
def addtag_closest(self, x, y, halo=None, start=None):
self._do('addtag', 'closest', x, y, halo, start)
def addtag_enclosed(self, x1, y1, x2, y2):
self._do('addtag', 'enclosed', x1, y1, x2, y2)
def addtag_overlapping(self, x1, y1, x2, y2):
self._do('addtag', 'overlapping', x1, y1, x2, y2)
def addtag_withtag(self, tagOrId):
self._do('addtag', 'withtag', tagOrId)
def bbox(self):
return self.canvas._getints(self._do('bbox'))
def bind(self, sequence=None, command=None, add=None):
return self.canvas.tag_bind(self.id, sequence, command, add)
def unbind(self, sequence, funcid=None):
self.canvas.tag_unbind(self.id, sequence, funcid)
def coords(self, *pts):
return self._do('coords', pts)
def dchars(self, first, last=None):
self._do('dchars', first, last)
def delete(self):
self._do('delete')
def dtag(self, tagToDelete=None):
self._do('dtag', tagToDelete)
def focus(self):
self._do('focus')
def gettags(self):
return self.canvas.tk.splitlist(self._do('gettags', self.tag))
def icursor(self, index):
return self._do('icursor', index)
def index(self, index):
return self.canvas.tk.getint(self._do('index', index))
def insert(self, beforeThis, string):
self._do('insert', beforeThis, string)
def config(self, cnf={}, **kw):
return self.canvas.itemconfigure(self.tag, _cnfmerge((cnf,kw)))
def lower(self, belowThis=None):
self._do('lower', belowThis)
def move(self, xAmount, yAmount):
self._do('move', xAmount, yAmount)
def tkraise(self, aboveThis=None):
self._do('raise', aboveThis)
lift = tkraise
def scale(self, xOrigin, yOrigin, xScale, yScale):
self._do('scale', xOrigin, yOrigin, xScale, yScale)
def select_adjust(self, index):
self.canvas._do('select', ('adjust', self.tag, index))
def select_from(self, index):
self.canvas._do('select', ('from', self.tag, index))
def select_to(self, index):
self.canvas._do('select', ('to', self.tag, index))
def type(self):
return self._do('type')
| mit |
ahb0327/intellij-community | python/lib/Lib/signal.py | 93 | 6858 | """
This module provides mechanisms to use signal handlers in Python.
Functions:
signal(sig,action) -- set the action for a given signal (done)
pause(sig) -- wait until a signal arrives [Unix only]
alarm(seconds) -- cause SIGALRM after a specified time [Unix only]
getsignal(sig) -- get the signal action for a given signal
default_int_handler(action) -- default SIGINT handler (done, but acts string)
Constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
*** IMPORTANT NOTICES ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
According to http://java.sun.com/products/jdk/faq/faq-sun-packages.html
'writing java programs that rely on sun.* is risky: they are not portable, and are not supported.'
However, in Jython, like Python, we let you decide what makes
sense for your application. If sun.misc.Signal is not available,
an ImportError is raised.
"""
try:
import sun.misc.Signal
except ImportError:
raise ImportError("signal module requires sun.misc.Signal, which is not available on this platform")
import os
import sun.misc.SignalHandler
import sys
import threading
import time
from java.lang import IllegalArgumentException
from java.util.concurrent.atomic import AtomicReference
debug = 0
def _init_signals():
# install signals by checking for standard names
# using IllegalArgumentException to diagnose
possible_signals = """
SIGABRT
SIGALRM
SIGBUS
SIGCHLD
SIGCONT
SIGFPE
SIGHUP
SIGILL
SIGINFO
SIGINT
SIGIOT
SIGKILL
SIGPIPE
SIGPOLL
SIGPROF
SIGQUIT
SIGSEGV
SIGSTOP
SIGSYS
SIGTERM
SIGTRAP
SIGTSTP
SIGTTIN
SIGTTOU
SIGURG
SIGUSR1
SIGUSR2
SIGVTALRM
SIGWINCH
SIGXCPU
SIGXFSZ
""".split()
_module = __import__(__name__)
signals = {}
signals_by_name = {}
for signal_name in possible_signals:
try:
java_signal = sun.misc.Signal(signal_name[3:])
except IllegalArgumentException:
continue
signal_number = java_signal.getNumber()
signals[signal_number] = java_signal
signals_by_name[signal_name] = java_signal
setattr(_module, signal_name, signal_number) # install as a module constant
return signals
_signals = _init_signals()
NSIG = max(_signals.iterkeys()) + 1
SIG_DFL = sun.misc.SignalHandler.SIG_DFL # default system handler
SIG_IGN = sun.misc.SignalHandler.SIG_IGN # handler to ignore a signal
class JythonSignalHandler(sun.misc.SignalHandler):
def __init__(self, action):
self.action = action
def handle(self, signal):
# passing a frame here probably don't make sense in a threaded system,
# but perhaps revisit
self.action(signal.getNumber(), None)
def signal(sig, action):
"""
signal(sig, action) -> action
Set the action for the given signal. The action can be SIG_DFL,
SIG_IGN, or a callable Python object. The previous action is
returned. See getsignal() for possible return values.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
# maybe keep a weak ref map of handlers we have returned?
try:
signal = _signals[sig]
except KeyError:
raise ValueError("signal number out of range")
if callable(action):
prev = sun.misc.Signal.handle(signal, JythonSignalHandler(action))
elif action in (SIG_IGN, SIG_DFL) or isinstance(action, sun.misc.SignalHandler):
prev = sun.misc.Signal.handle(signal, action)
else:
raise TypeError("signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object")
if isinstance(prev, JythonSignalHandler):
return prev.action
else:
return prev
# dangerous! don't use!
def getsignal(sig):
"""getsignal(sig) -> action
Return the current action for the given signal. The return value can be:
SIG_IGN -- if the signal is being ignored
SIG_DFL -- if the default action for the signal is in effect
None -- if an unknown handler is in effect
anything else -- the callable Python object used as a handler
Note for Jython: this function is NOT threadsafe. The underlying
Java support only enables getting the current signal handler by
setting a new one. So this is completely prone to race conditions.
"""
try:
signal = _signals[sig]
except KeyError:
raise ValueError("signal number out of range")
current = sun.misc.Signal.handle(signal, SIG_DFL)
sun.misc.Signal.handle(signal, current) # and reinstall
if isinstance(current, JythonSignalHandler):
return current.action
else:
return current
def default_int_handler(sig, frame):
"""
default_int_handler(...)
The default handler for SIGINT installed by Python.
It raises KeyboardInterrupt.
"""
raise KeyboardInterrupt
def pause():
raise NotImplementedError
_alarm_timer_holder = AtomicReference()
def _alarm_handler(sig, frame):
print "Alarm clock"
os._exit(0)
# install a default alarm handler, the one we get by default doesn't
# work terribly well since it throws a bus error (at least on OS X)!
try:
SIGALRM
signal(SIGALRM, _alarm_handler)
except NameError:
pass
class _Alarm(object):
def __init__(self, interval, task):
self.interval = interval
self.task = task
self.scheduled = None
self.timer = threading.Timer(self.interval, self.task)
def start(self):
self.timer.start()
self.scheduled = time.time() + self.interval
def cancel(self):
self.timer.cancel()
now = time.time()
if self.scheduled and self.scheduled > now:
return self.scheduled - now
else:
return 0
def alarm(time):
try:
SIGALRM
except NameError:
raise NotImplementedError("alarm not implemented on this platform")
def raise_alarm():
sun.misc.Signal.raise(_signals[SIGALRM])
if time > 0:
new_alarm_timer = _Alarm(time, raise_alarm)
else:
new_alarm_timer = None
old_alarm_timer = _alarm_timer_holder.getAndSet(new_alarm_timer)
if old_alarm_timer:
scheduled = int(old_alarm_timer.cancel())
else:
scheduled = 0
if new_alarm_timer:
new_alarm_timer.start()
return scheduled
| apache-2.0 |
shumingch/molecule_simulation | init.py | 1 | 2823 | #init from camera
from bge import logic, render
from particle import Particle
from mathutils import Vector, Matrix
gdict = logic.globalDict
def draw():
camera = scene.objects["Camera"]
string = "BondCraft"
###draws bonds and changes text before frame load
atoms = gdict["atoms"].copy()
for atom in gdict["atoms"]:
###searches for everything connected to it and draws the bonds
#prevents two line draws
atoms.remove(atom)
atom.bond.draw_bonds(atoms)
for molecule in gdict["molecules"]:
molecule.draw_text()
for texture in gdict["textures"]:
texture.refresh(True)
if camera["laser"]:
crosshairs = scene.objects["Crosshairs"]
start = camera.worldPosition + camera.getAxisVect((1,-1,0))
end = camera.worldPosition - camera.getAxisVect((0,0,1))
render.drawLine(start,end,[0,1,0])
obj,point, normal = camera.rayCast(crosshairs,None,2000)
if obj:
render.drawLine(point,point + normal * 10000,[0,1,0])
obj.applyForce(-100 * normal)
def play(cont):
scene.restart()
gdict["play"] = True
UI = cont.owner.scene
UI.end()
def main(cont):
global scene
scene = logic.getCurrentScene()
scenes = logic.getSceneList()
camera = cont.owner
overlay = camera.actuators["Scene"]
# camera state 2 is in the menu
if camera.state == 2:
if "play" not in gdict:
# show menu
cont.activate(overlay)
render.showMouse(True)
logic.setGravity([0,0,-9.8])
else:
# start game
camera.state = 1
render.showMouse(False)
scene.objects["Floor"].endObject()
scene.objects["Spawn"].endObject()
logic.setGravity([0,0,0])
scene.objects["Cube"].visible = True
scene.objects["BondCraft"].visible = True
return
print("###############GAME START##################")
gdict.clear()
gdict["free"] = { "Hydrogen": set(),
"Carbon": set(),
"Oxygen": set(),
"Nitrogen": set(),
"Bromine": set()
}
gdict["cations"] = set()
gdict["atoms"] = set()
gdict["textures"] = []
gdict["molecules"] = set()
gdict["primary"] = "Hydrogen"
gdict["camera"] = scene.objects["Camera"]
gdict["prim_text"] = scene.objects["prim_text"]
gdict["prim_text"].resolution = 16
gdict["text"] = scene.objects["Text"]
gdict["text"].resolution = 16
#bind line drawing function
scene.pre_draw = [draw]
#slow down
#fps =1000
#logic.setLogicTicRate(fps)
#logic.setPhysicsTicRate(fps) | mit |
orchidinfosys/odoo | addons/survey_crm/survey.py | 47 | 1162 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
class survey_mail_compose_message(osv.TransientModel):
_inherit = 'survey.mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') == 'crm.lead' and context.get('active_ids'):
partner_ids = []
emails_list = []
for lead in self.pool.get('crm.lead').browse(cr, uid, context.get('active_ids'), context=context):
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
else:
email = lead.contact_name and "%s <%s>" % (lead.contact_name, lead.email_from or "") or lead.email_from or None
if email and email not in emails_list:
emails_list.append(email)
multi_email = "\n".join(emails_list)
res.update({'partner_ids': list(set(partner_ids)), 'multi_email': multi_email})
return res
| gpl-3.0 |