id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
299,900 | load yaml | import os
import yaml
import xacro
from launch import LaunchDescription
from launch_ros.actions import Node
from ament_index_python import get_package_share_directory
def get_package_file(package, file_path):
"""Get the location of a file installed in an ament package"""
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
"""Load the contents of a file into a string"""
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def METHOD_NAME(file_path):
"""Load a yaml file into a dictionary"""
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def generate_launch_description():
xacro_file = get_package_file('myworkcell_support', 'urdf/workcell.urdf.xacro')
srdf_file = get_package_file('myworkcell_moveit_config', 'config/myworkcell.srdf')
kinematics_file = get_package_file('myworkcell_moveit_config', 'config/kinematics.yaml')
ompl_config_file = get_package_file('myworkcell_moveit_config', 'config/ompl_planning.yaml')
joint_limits_file = get_package_file('myworkcell_moveit_config','config/joint_limits.yaml')
moveit_controllers_file = get_package_file('myworkcell_moveit_config', 'config/moveit_controllers.yaml')
robot_description = xacro.process_file(xacro_file).toprettyxml(indent=' ')
robot_description_semantic = load_file(srdf_file)
kinematics_config = METHOD_NAME(kinematics_file)
ompl_config = METHOD_NAME(ompl_config_file)
joint_limits_config = METHOD_NAME(joint_limits_file)
# Setting up MoveitCpp configuration parameters
moveit_controllers = METHOD_NAME(moveit_controllers_file)
trajectory_execution = {
'moveit_manage_controllers': True,
'trajectory_execution.allowed_execution_duration_scaling': 1.2,
'trajectory_execution.allowed_goal_duration_margin': 0.5,
'trajectory_execution.allowed_start_tolerance': 0.01
}
planning_scene_monitor_config = {
'publish_planning_scene': True,
'publish_geometry_updates': True,
'publish_state_updates': True,
'publish_transforms_updates': True
}
moveit_cpp_config = yaml.load("""
planning_scene_monitor_options:
name: "planning_scene_monitor"
robot_description: "robot_description"
joint_state_topic: "/joint_states"
attached_collision_object_topic: "/moveit_cpp/planning_scene_monitor"
publish_planning_scene_topic: "/moveit_cpp/publish_planning_scene"
monitored_planning_scene_topic: "/moveit_cpp/monitored_planning_scene"
wait_for_initial_state_timeout: 10.0
planning_pipelines:
#namespace: "moveit_cpp" # optional, default is ~
pipeline_names: ["ompl"]
plan_request_params:
planning_time: 10.0
planning_attempts: 3
planning_pipeline: ompl
max_velocity_scaling_factor: 0.5
max_acceleration_scaling_factor: 0.5
# octomap parameters (when used)
octomap_frame: world
octomap_resolution: 0.01
max_range: 5.0""")
return LaunchDescription([
Node(
name='myworkcell_node',
package='myworkcell_core',
executable='myworkcell_node',
output='screen',
parameters=[
{
'base_frame': 'world',
'robot_description': robot_description,
'robot_description_semantic': robot_description_semantic,
'robot_description_kinematics': kinematics_config,
'robot_description_planning' : joint_limits_config,
'planning_pipelines': ['ompl'],
'ompl': ompl_config
},
moveit_cpp_config,
moveit_controllers,
trajectory_execution,
planning_scene_monitor_config,
],
),
Node(
name='fake_ar_publisher_node',
package='fake_ar_publisher',
executable='fake_ar_publisher_node',
output='screen',
),
Node(
name='vision_node',
package='myworkcell_core',
executable='vision_node',
output='screen',
),
] |
299,901 | add args | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Dict, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq.utils import safe_hasattr
from fairseq.models.transformer import (
TransformerModel,
TransformerEncoder,
TransformerDecoder
)
from ..modules.attn_head_selector import AttnHeadSelector
from ..modules.head_selection_transformer_layer import (
HeadSelectionTransformerEncoderLayer,
HeadSelectionTransformerDecoderLayer
)
class HeadSelectionTransformerModel(TransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
@staticmethod
def METHOD_NAME(parser):
TransformerModel.METHOD_NAME(parser)
# encoder head selection
parser.add_argument(
"--encoder-attn-head-select",
action="store_true",
default=False,
help="encoder head selection"
)
parser.add_argument(
"--total-encoder-attention-heads",
type=int,
help="total number of encoder attention heads"
)
# decoder self attention
parser.add_argument(
"--decoder-self-attn-head-select",
action="store_true",
default=False,
help="decoder self-attention head selection"
)
# decoder-encoder attention
parser.add_argument(
"--dec-enc-attn-head-select",
action="store_true",
default=False,
help="decoder-encoder attention head selection"
)
parser.add_argument(
"--total-decoder-attention-heads",
type=int,
help="total number of decoder attention heads"
)
# selection strategy
parser.add_argument(
"--attn-head-select-strategy",
type=str,
help="attention head selection strategy, subset or group"
)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
if safe_hasattr(args, "encoder_attn_head_select") and args.encoder_attn_head_select:
return HeadSelectionTransformerEncoder(
args, src_dict, embed_tokens
)
else:
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
if (safe_hasattr(args, "decoder_self_attn_head_select") and args.decoder_self_attn_head_select) or (safe_hasattr(args, "dec_enc_attn_head_select") and args.dec_enc_attn_head_select):
return HeadSelectionTransformerDecoder(
args, tgt_dict, embed_tokens
)
else:
return TransformerDecoder(args, tgt_dict, embed_tokens)
class HeadSelectionTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
self.num_tasks = args.encoder_tasks
self.num_layers = args.encoder_layers
self.total_num_heads = args.total_encoder_attention_heads
self.num_heads = args.encoder_attention_heads
self.select_strategy = args.attn_head_select_strategy
super().__init__(args, dictionary, embed_tokens)
self.attn_head_selector = AttnHeadSelector(
self.num_tasks,
self.num_layers,
self.total_num_heads,
self.num_heads,
self.select_strategy
)
self.task_ids = None
self.layers = nn.ModuleList(
[self.build_encoder_layer(args, i) for i in range(args.encoder_layers)]
)
def set_task_ids(self, task_ids):
self.task_ids = task_ids
def build_encoder_layer(self, args, layer_idx=None):
return HeadSelectionTransformerEncoderLayer(
args,
layer_idx,
attn_head_selector=self.attn_head_selector
)
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
self.attn_head_selector.head_select(self.task_ids)
return super().forward(src_tokens, src_lengths, return_all_hiddens, token_embeddings)
class HeadSelectionTransformerDecoder(TransformerDecoder):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.num_tasks = args.decoder_tasks
self.num_layers = args.decoder_layers
self.total_num_heads = args.total_decoder_attention_heads
self.num_heads = args.decoder_attention_heads
self.select_strategy = args.attn_head_select_strategy
super().__init__(
args, dictionary, embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection
)
self.self_attn_head_selector = None
self.enc_attn_head_selector = None
if safe_hasattr(args, "decoder_self_attn_head_select") and args.decoder_self_attn_head_select:
self.self_attn_head_selector = AttnHeadSelector(
self.num_tasks,
self.num_layers,
self.total_num_heads,
self.num_heads,
self.select_strategy
)
if safe_hasattr(args, "dec_enc_attn_head_select") and args.dec_enc_attn_head_select:
self.enc_attn_head_selector = AttnHeadSelector(
self.num_tasks,
self.num_layers,
self.total_num_heads,
self.num_heads,
self.select_strategy
)
self.task_ids = None
self.layers = nn.ModuleList(
[
self.build_head_selection_decoder_layer(args, no_encoder_attn, idx) for idx in range(args.decoder_layers)
]
)
def set_task_ids(self, task_ids):
self.task_ids = task_ids
def build_head_selection_decoder_layer(self, args, no_encoder_attn=False, layer_idx=None):
return HeadSelectionTransformerDecoderLayer(
args,
layer_idx,
self.self_attn_head_selector,
self.enc_attn_head_selector,
no_encoder_attn=no_encoder_attn
)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
if self.self_attn_head_selector is not None:
self.self_attn_head_selector.head_select(self.task_ids)
if self.enc_attn_head_selector is not None:
self.enc_attn_head_selector.head_select(self.task_ids)
return super().forward(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
features_only=features_only,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens
) |
299,902 | test create read graph | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_scan():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i], name="s_init")
x_trans = te.compute((m, n), lambda i, j: x[i, j] + 1, name="x_trans")
s_up1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + 1, name="up1")
s_update = te.compute((m, n), lambda t, i: s_up1[t, i] + x_trans[t, i], name="update")
s_scan = tvm.te.scan(s_init, s_update, s_state)
def test_getbody():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
assert set(body) == set([s_scan.op, s_update.op, s_up1.op])
def test_attach_path():
s = te.create_schedule(s_scan.op)
s[x_trans].compute_at(s[s_update], s_update.op.axis[0])
apath = tvm.te.schedule.CreateAttachPath(s)
assert tuple(apath[s_update.op]) == tuple([s_scan.op.scan_axis])
assert tuple(apath[x_trans.op]) == tuple([s_update.op.axis[0], s_scan.op.scan_axis])
def test_fix_pt():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.spatial_axis_[0]].value != 0
def test_scan_fix_point():
m = te.var("m")
n = te.var("n")
l = te.var("l")
x = te.compute((l, m, n), lambda *i: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((l, m, n))
s_init = te.compute((1, m, n), lambda _, i, j: x[0, i, j], name="s_init")
def test_scan0():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 1
def test_scan1():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, j, i], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan3_not_exact_reach():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, i, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, 10] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan4_reach_other():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, j, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, j] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan5_multi_output():
m = te.var("m")
n = te.var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
body = tvm.te.schedule.ScanGetBody(r0.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(r0.op)
assert fxpt[r1.op.spatial_axis_[0]].value == 1
test_scan0()
test_scan1()
test_scan3_not_exact_reach()
test_scan4_reach_other()
test_scan5_multi_output()
def METHOD_NAME():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j])
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3)
g = tvm.te.schedule.CreateReadGraph([A2.op])
assert g[A2.op][0] == A1
assert g[A1.op][0] == A
post_order = tvm.te.schedule.PostDFSOrder([A2.op], g)
assert post_order[0] == A.op
assert post_order[1] == A1.op
if __name__ == "__main__":
test_scan()
METHOD_NAME()
test_scan_fix_point() |
299,903 | num vowels map | from __future__ import annotations
import pandas.testing as tm
import sqlalchemy as sa
from pytest import mark, param
from ibis import _, udf
no_python_udfs = mark.notimpl(
[
"bigquery",
"clickhouse",
"dask",
"druid",
"impala",
"mssql",
"mysql",
"oracle",
"pandas",
"polars",
"pyspark",
"trino",
]
)
@no_python_udfs
@mark.notyet(["datafusion"], raises=NotImplementedError)
def test_udf(batting):
@udf.scalar.python
def num_vowels(s: str, include_y: bool = False) -> int:
return sum(map(s.lower().count, "aeiou" + ("y" * include_y)))
batting = batting.limit(100)
nvowels = num_vowels(batting.playerID)
assert nvowels.op().__module__ == __name__
assert type(nvowels.op()).__qualname__ == "num_vowels"
expr = batting.group_by(id_len=nvowels).agg(n=_.count())
result = expr.execute()
assert not result.empty
expr = batting.group_by(id_len=num_vowels(batting.playerID, include_y=True)).agg(
n=_.count()
)
result = expr.execute()
assert not result.empty
@no_python_udfs
@mark.notyet(
["postgres"], raises=TypeError, reason="postgres only supports map<string, string>"
)
@mark.notyet(["datafusion"], raises=NotImplementedError)
@mark.notyet(
["sqlite"],
raises=sa.exc.OperationalError,
reason="sqlite doesn't support map types",
)
def test_map_udf(batting):
@udf.scalar.python
def METHOD_NAME(s: str, include_y: bool = False) -> dict[str, int]:
y = "y" * include_y
vowels = "aeiou" + y
counter = dict.fromkeys(vowels, 0)
for c in s:
if c in vowels:
counter[c] += 1
return counter
batting = batting.limit(100)
expr = batting.select(vowel_dist=METHOD_NAME(batting.playerID))
df = expr.execute()
assert not df.empty
@no_python_udfs
@mark.notyet(
["postgres"], raises=TypeError, reason="postgres only supports map<string, string>"
)
@mark.notyet(["datafusion"], raises=NotImplementedError)
@mark.notyet(["sqlite"], raises=TypeError, reason="sqlite doesn't support map types")
def test_map_merge_udf(batting):
@udf.scalar.python
def vowels_map(s: str) -> dict[str, int]:
vowels = "aeiou"
counter = dict.fromkeys(vowels, 0)
for c in s:
if c in vowels:
counter[c] += 1
return counter
@udf.scalar.python
def consonants_map(s: str) -> dict[str, int]:
import string
letters = frozenset(string.ascii_lowercase)
consonants = letters - frozenset("aeiou")
counter = dict.fromkeys(consonants, 0)
for c in s:
if c in consonants:
counter[c] += 1
return counter
@udf.scalar.python
def map_merge(x: dict[str, int], y: dict[str, int]) -> dict[str, int]:
z = x.copy()
z.update(y)
return z
batting = batting.limit(100)
expr = batting.select(
vowel_dist=map_merge(
vowels_map(batting.playerID), consonants_map(batting.playerID)
)
)
df = expr.execute()
assert not df.empty
@udf.scalar.pandas
def add_one_pandas(s: int) -> int: # s is series, int is the element type
return s + 1
@udf.scalar.pyarrow
def add_one_pyarrow(s: int) -> int: # s is series, int is the element type
import pyarrow.compute as pac
return pac.add(s, 1)
@no_python_udfs
@mark.notyet(
["postgres"],
raises=NotImplementedError,
reason="postgres only supports Python-native UDFs",
)
@mark.parametrize(
"add_one",
[
param(
add_one_pandas,
marks=[
mark.notyet(
["duckdb", "datafusion", "sqlite"],
raises=NotImplementedError,
reason="backend doesn't support pandas UDFs",
),
],
),
param(
add_one_pyarrow,
marks=[
mark.notyet(
["snowflake", "sqlite"],
raises=NotImplementedError,
reason="backend doesn't support pyarrow UDFs",
)
],
),
],
)
def test_vectorized_udf(batting, add_one):
batting = batting.limit(100)
expr = (
batting.select(year_id=lambda t: t.yearID)
.mutate(next_year=lambda t: add_one(t.year_id))
.order_by("year_id")
)
result = expr.execute()
expected = (
batting.select(year_id=lambda t: t.yearID)
.execute()
.assign(next_year=lambda df: df.year_id + 1)
.sort_values(["year_id"])
.reset_index(drop=True)
)
tm.assert_frame_equal(result, expected) |
299,904 | autoreconf | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os
import sys
from spack.package import *
class Verrou(AutotoolsPackage):
"""A floating-point error checker.
Verrou helps you look for floating-point round-off errors in programs. It
implements a stochastic floating-point arithmetic based on random rounding:
all floating-point operations are perturbed by randomly switching rounding
modes. This can be seen as an asynchronous variant of the CESTAC method, or
a subset of Monte Carlo Arithmetic, performing only output randomization
through random rounding.
"""
homepage = "https://github.com/edf-hpc/verrou"
url = "https://github.com/edf-hpc/verrou/archive/v2.0.0.tar.gz"
git = "https://github.com/edf-hpc/verrou.git"
maintainers("HadrienG2")
version("develop", branch="master")
version("2.2.0", sha256="d4ea3d19f0c61329723907b5b145d85776bb702643c1605a31f584484d2c5efc")
version("2.1.0", sha256="b1ba49f84aebab15b8ab5649946c9c31b53ad1499f6ffb681c98db41ed28566d")
# The server is sometimes a bit slow to respond
timeout = {"timeout": 60}
resource(
name="valgrind-3.15.0",
url="https://sourceware.org/pub/valgrind/valgrind-3.15.0.tar.bz2",
sha256="417c7a9da8f60dd05698b3a7bc6002e4ef996f14c13f0ff96679a16873e78ab1",
when="@2.2.0:",
fetch_options=timeout,
)
resource(
name="valgrind-3.14.0",
url="https://sourceware.org/pub/valgrind/valgrind-3.14.0.tar.bz2",
sha256="037c11bfefd477cc6e9ebe8f193bb237fe397f7ce791b4a4ce3fa1c6a520baa5",
when="@2.1.0:2.1",
fetch_options=timeout,
)
resource(
name="valgrind-3.13.0",
url="https://sourceware.org/pub/valgrind/valgrind-3.13.0.tar.bz2",
sha256="d76680ef03f00cd5e970bbdcd4e57fb1f6df7d2e2c071635ef2be74790190c3b",
when="@1.1.0:2.0",
fetch_options=timeout,
)
variant("fma", default=True, description="Activates fused multiply-add support for Verrou")
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("libtool", type="build")
depends_on("m4", type="build")
depends_on("[email protected]:", type=("build", "run"))
extends("python")
def patch(self):
# We start with the verrou source tree and a "valgrind-x.y.z" subdir.
# But we actually need a valgrind source tree with a "verrou" subdir.
# First, let's locate the valgrind sources...
valgrind_dirs = glob.glob("valgrind-*")
assert len(valgrind_dirs) == 1
valgrind_dir = valgrind_dirs[0]
# ...then we can flip the directory organization around
verrou_files = os.listdir(".")
verrou_files.remove(valgrind_dir)
os.mkdir("verrou")
for name in verrou_files:
os.rename(name, os.path.join("verrou", name))
for name in os.listdir(valgrind_dir):
os.rename(os.path.join(valgrind_dir, name), name)
os.rmdir(valgrind_dir)
# Once this is done, we can patch valgrind
if self.spec.satisfies("@:2.0"):
which("patch")("-p0", "--input=verrou/valgrind.diff")
else:
which("patch")("-p1", "--input=verrou/valgrind.diff")
# Autogenerated perl path may be too long, need to fix this here
# because these files are used during the build.
for link_tool_in in glob.glob("coregrind/link_tool_exe_*.in"):
filter_file("^#! @PERL@", "#! /usr/bin/env perl", link_tool_in)
def METHOD_NAME(self, spec, prefix):
# Needed because we patched valgrind
which("bash")("autogen.sh")
def configure_args(self):
spec = self.spec
options = [
"--enable-only64bit",
"--{0}able-verrou-fma".format("en" if "+fma" in spec else "dis"),
]
if sys.platform == "darwin":
options.append("--build=amd64-darwin")
return options |
299,905 | get brand | from django.core.exceptions import ImproperlyConfigured
from allauth.socialaccount import app_settings
class ProviderException(Exception):
pass
class Provider(object):
slug = None
uses_apps = True
def __init__(self, request, app=None):
self.request = request
if self.uses_apps and app is None:
raise ValueError("missing: app")
self.app = app
@classmethod
def get_slug(cls):
return cls.slug or cls.id
def get_login_url(self, request, next=None, **kwargs):
"""
Builds the URL to redirect to when initiating a login for this
provider.
"""
raise NotImplementedError("get_login_url() for " + self.name)
def media_js(self, request):
"""
Some providers may require extra scripts (e.g. a Facebook connect)
"""
return ""
def wrap_account(self, social_account):
return self.account_class(social_account)
def get_settings(self):
return app_settings.PROVIDERS.get(self.id, {})
def sociallogin_from_response(self, request, response):
"""
Instantiates and populates a `SocialLogin` model based on the data
retrieved in `response`. The method does NOT save the model to the
DB.
Data for `SocialLogin` will be extracted from `response` with the
help of the `.extract_uid()`, `.extract_extra_data()`,
`.extract_common_fields()`, and `.extract_email_addresses()`
methods.
:param request: a Django `HttpRequest` object.
:param response: object retrieved via the callback response of the
social auth provider.
:return: A populated instance of the `SocialLogin` model (unsaved).
"""
# NOTE: Avoid loading models at top due to registry boot...
from allauth.socialaccount.adapter import get_adapter
from allauth.socialaccount.models import SocialAccount, SocialLogin
adapter = get_adapter()
uid = self.extract_uid(response)
if not isinstance(uid, str):
raise ValueError(f"uid must be a string: {repr(uid)}")
if len(uid) > app_settings.UID_MAX_LENGTH:
raise ImproperlyConfigured(
f"SOCIALACCOUNT_UID_MAX_LENGTH too small (<{len(uid)})"
)
extra_data = self.extract_extra_data(response)
common_fields = self.extract_common_fields(response)
socialaccount = SocialAccount(
extra_data=extra_data,
uid=uid,
provider=self.app.provider_id or self.app.provider,
)
email_addresses = self.extract_email_addresses(response)
self.cleanup_email_addresses(
common_fields.get("email"),
email_addresses,
email_verified=common_fields.get("email_verified"),
)
sociallogin = SocialLogin(
account=socialaccount, email_addresses=email_addresses
)
user = sociallogin.user = adapter.new_user(request, sociallogin)
user.set_unusable_password()
adapter.populate_user(request, sociallogin, common_fields)
return sociallogin
def extract_uid(self, data):
"""
Extracts the unique user ID from `data`
"""
raise NotImplementedError(
"The provider must implement the `extract_uid()` method"
)
def extract_extra_data(self, data):
"""
Extracts fields from `data` that will be stored in
`SocialAccount`'s `extra_data` JSONField.
:return: any JSON-serializable Python structure.
"""
return data
def extract_common_fields(self, data):
"""
Extracts fields from `data` that will be used to populate the
`User` model in the `SOCIALACCOUNT_ADAPTER`'s `populate_user()`
method.
For example:
{'first_name': 'John'}
:return: dictionary of key-value pairs.
"""
return {}
def cleanup_email_addresses(self, email, addresses, email_verified=False):
# Avoid loading models before adapters have been registered.
from allauth.account.models import EmailAddress
# Move user.email over to EmailAddress
if email and email.lower() not in [a.email.lower() for a in addresses]:
addresses.append(
EmailAddress(email=email, verified=bool(email_verified), primary=True)
)
# Force verified emails
settings = self.get_settings()
verified_email = settings.get("VERIFIED_EMAIL", False)
if verified_email:
for address in addresses:
address.verified = True
def extract_email_addresses(self, data):
"""
For example:
[EmailAddress(email='[email protected]',
verified=True,
primary=True)]
"""
return []
@classmethod
def get_package(cls):
pkg = getattr(cls, "package", None)
if not pkg:
pkg = cls.__module__.rpartition(".")[0]
return pkg
class ProviderAccount(object):
def __init__(self, social_account):
self.account = social_account
def get_profile_url(self):
return None
def get_avatar_url(self):
return None
def METHOD_NAME(self):
"""
Returns a dict containing an id and name identifying the
brand. Useful when displaying logos next to accounts in
templates.
For most providers, these are identical to the provider. For
OpenID however, the brand can derived from the OpenID identity
url.
"""
provider = self.account.get_provider()
return dict(id=provider.id, name=provider.name)
def __str__(self):
return self.to_str()
def to_str(self):
"""
This did not use to work in the past due to py2 compatibility:
class GoogleAccount(ProviderAccount):
def __str__(self):
dflt = super(GoogleAccount, self).__str__()
return self.account.extra_data.get('name', dflt)
So we have this method `to_str` that can be overridden in a conventional
fashion, without having to worry about it.
"""
return self.METHOD_NAME()["name"] |
299,906 | assert fields | # (C) Datadog, Inc. 2023-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from concurrent.futures.thread import ThreadPoolExecutor
from typing import List
import pytest
from datadog_checks.base.utils.db.utils import DBMAsyncJob
from .common import POSTGRES_VERSION
from .utils import run_one_check
pytestmark = [pytest.mark.integration, pytest.mark.usefixtures('dd_environment')]
@pytest.fixture
def dbm_instance(pg_instance):
pg_instance['dbm'] = True
pg_instance['min_collection_interval'] = 0.1
pg_instance['query_samples'] = {'enabled': False}
pg_instance['query_activity'] = {'enabled': False}
pg_instance['query_metrics'] = {'enabled': False}
pg_instance['collect_resources'] = {'enabled': True, 'run_sync': True, 'collection_interval': 0.1}
pg_instance['collect_settings'] = {'enabled': True, 'run_sync': True, 'collection_interval': 0.1}
return pg_instance
@pytest.fixture(autouse=True)
def stop_orphaned_threads():
# make sure we shut down any orphaned threads and create a new Executor for each test
DBMAsyncJob.executor.shutdown(wait=True)
DBMAsyncJob.executor = ThreadPoolExecutor()
def test_collect_metadata(integration_check, dbm_instance, aggregator):
check = integration_check(dbm_instance)
check.check(dbm_instance)
dbm_metadata = aggregator.get_event_platform_events("dbm-metadata")
event = next((e for e in dbm_metadata if e['kind'] == 'pg_settings'), None)
assert event is not None
assert event['host'] == "stubbed.hostname"
assert event['dbms'] == "postgres"
assert event['kind'] == "pg_settings"
assert len(event["metadata"]) > 0
def test_collect_schemas(integration_check, dbm_instance, aggregator):
dbm_instance["collect_schemas"] = {'enabled': True, 'collection_interval': 0.5}
dbm_instance['relations'] = [{'relation_regex': ".*"}]
dbm_instance["database_autodiscovery"] = {"enabled": True, "include": ["datadog"]}
del dbm_instance['dbname']
check = integration_check(dbm_instance)
run_one_check(check, dbm_instance)
dbm_metadata = aggregator.get_event_platform_events("dbm-metadata")
schema_event = next(e for e in dbm_metadata if e['kind'] == 'pg_databases')
# there should only be one database, datadog_test
database_metadata = schema_event['metadata']
assert len(database_metadata) == 1
assert 'datadog_test' == database_metadata[0]['name']
# there should only two schemas, 'public' and 'datadog'. datadog is empty
schema_names = [s['name'] for s in database_metadata[0]['schemas']]
assert 'public' in schema_names
assert 'datadog' in schema_names
schema_public = None
for schema in database_metadata[0]['schemas']:
if schema['name'] == 'public':
schema_public = schema
# check that all expected tables are present
tables_set = {'persons', "personsdup1", "personsdup2", "pgtable", "pg_newtable", "cities"}
# if version isn't 9 or 10, check that partition master is in tables
if float(POSTGRES_VERSION) >= 11:
tables_set.update({'test_part'})
tables_not_reported_set = {'test_part1', 'test_part2'}
tables_got = []
for table in schema_public['tables']:
tables_got.append(table['name'])
# make some assertions on fields
if table['name'] == "persons":
# check that foreign keys, indexes get reported
keys = list(table.keys())
METHOD_NAME(keys, ["foreign_keys", "columns", "toast_table", "id", "name"])
METHOD_NAME(list(table['foreign_keys'][0].keys()), ['name', 'definition'])
METHOD_NAME(
list(table['columns'][0].keys()),
[
'name',
'nullable',
'data_type',
'default',
],
)
if table['name'] == "cities":
keys = list(table.keys())
METHOD_NAME(keys, ["indexes", "columns", "toast_table", "id", "name"])
METHOD_NAME(list(table['indexes'][0].keys()), ['name', 'definition'])
if float(POSTGRES_VERSION) >= 11:
if table['name'] == 'test_part':
keys = list(table.keys())
METHOD_NAME(keys, ["num_partitions", "partition_key"])
METHOD_NAME(tables_got, tables_set)
assert_not_fields(tables_got, tables_not_reported_set)
def METHOD_NAME(keys: List[str], fields: List[str]):
for field in fields:
assert field in keys
def assert_not_fields(keys: List[str], fields: List[str]):
for field in fields:
assert field not in keys |
299,907 | extract suite name from test method | import inspect
import os
import unittest
import ddtrace
from ddtrace import config
from ddtrace.constants import SPAN_KIND
from ddtrace.contrib.unittest.constants import COMPONENT_VALUE
from ddtrace.contrib.unittest.constants import FRAMEWORK
from ddtrace.contrib.unittest.constants import KIND
from ddtrace.ext import SpanTypes
from ddtrace.ext import test
from ddtrace.internal.ci_visibility import CIVisibility as _CIVisibility
from ddtrace.internal.ci_visibility.constants import EVENT_TYPE as _EVENT_TYPE
from ddtrace.internal.constants import COMPONENT
from ddtrace.internal.utils.wrappers import unwrap as _u
from ddtrace.vendor import wrapt
# unittest default settings
config._add(
"unittest",
dict(
_default_service="unittest",
operation_name=os.getenv("DD_UNITTEST_OPERATION_NAME", default="unittest.test"),
),
)
def get_version():
# type: () -> str
return ""
def _set_tracer(tracer):
unittest._datadog_tracer = tracer
def _store_span(item, span):
"""Store span at `unittest` instance."""
item._datadog_span = span
def _extract_span(item):
"""Extract span from `unittest` instance."""
return getattr(item, "_datadog_span", None)
def _extract_test_method_name(item):
"""Extract test method name from `unittest` instance."""
return getattr(item, "_testMethodName", None)
def METHOD_NAME(item):
item_type = type(item)
return getattr(item_type, "__name__", None)
def _extract_class_hierarchy_name(item):
item_type = type(item)
return getattr(item_type, "__name__", None)
def _extract_module_name_from_test_method(item):
return getattr(item, "__module__", None)
def _extract_test_skip_reason(item):
return item[1]
def _extract_test_file_name(item):
return os.path.basename(inspect.getfile(item.__class__))
def is_unittest_support_enabled():
return unittest and getattr(unittest, "_datadog_patch", False) and _CIVisibility.enabled
def _is_valid_result(instance, args):
return instance and type(instance) == unittest.runner.TextTestResult and args
def patch():
"""
Patch the instrumented methods from unittest
"""
if (
not config._ci_visibility_unittest_enabled
or getattr(unittest, "_datadog_patch", False)
or _CIVisibility.enabled
):
return
_CIVisibility.enable(config=ddtrace.config.unittest)
unittest._datadog_patch = True
_w = wrapt.wrap_function_wrapper
_w(unittest, "TextTestResult.addSuccess", add_success_test_wrapper)
_w(unittest, "TextTestResult.addFailure", add_failure_test_wrapper)
_w(unittest, "TextTestResult.addError", add_failure_test_wrapper)
_w(unittest, "TextTestResult.addSkip", add_skip_test_wrapper)
_w(unittest, "TestCase.run", start_test_wrapper_unittest)
def unpatch():
if not getattr(unittest, "_datadog_patch", False):
return
_u(unittest.TextTestResult, "addSuccess")
_u(unittest.TextTestResult, "addFailure")
_u(unittest.TextTestResult, "addError")
_u(unittest.TextTestResult, "addSkip")
_u(unittest.TestCase, "run")
unittest._datadog_patch = False
_CIVisibility.disable()
def _set_test_span_status(test_item, status, reason=None):
span = _extract_span(test_item)
if not span:
return
span.set_tag_str(test.STATUS, status)
if status == test.Status.FAIL.value:
exc_info = reason
span.set_exc_info(exc_info[0], exc_info[1], exc_info[2])
elif status == test.Status.SKIP.value:
span.set_tag_str(test.SKIP_REASON, reason)
def add_success_test_wrapper(func, instance, args, kwargs):
if is_unittest_support_enabled() and _is_valid_result(instance, args):
_set_test_span_status(test_item=args[0], status=test.Status.PASS.value)
return func(*args, **kwargs)
def add_failure_test_wrapper(func, instance, args, kwargs):
if is_unittest_support_enabled() and _is_valid_result(instance, args):
_set_test_span_status(test_item=args[0], reason=args[1], status=test.Status.FAIL.value)
return func(*args, **kwargs)
def add_skip_test_wrapper(func, instance, args, kwargs):
result = func(*args, **kwargs)
if is_unittest_support_enabled() and _is_valid_result(instance, args):
_set_test_span_status(test_item=args[0], reason=args[1], status=test.Status.SKIP.value)
return result
def start_test_wrapper_unittest(func, instance, args, kwargs):
if is_unittest_support_enabled():
tracer = getattr(unittest, "_datadog_tracer", _CIVisibility._instance.tracer)
with tracer.trace(
ddtrace.config.unittest.operation_name,
service=_CIVisibility._instance._service,
resource="unittest.test",
span_type=SpanTypes.TEST,
) as span:
span.set_tag_str(_EVENT_TYPE, SpanTypes.TEST)
span.set_tag_str(COMPONENT, COMPONENT_VALUE)
span.set_tag_str(SPAN_KIND, KIND)
span.set_tag_str(test.FRAMEWORK, FRAMEWORK)
span.set_tag_str(test.TYPE, SpanTypes.TEST)
suite_name = METHOD_NAME(instance)
span.set_tag_str(test.NAME, _extract_test_method_name(instance))
span.set_tag_str(test.SUITE, suite_name)
span.set_tag_str(test.MODULE, _extract_module_name_from_test_method(instance))
span.set_tag_str(test.STATUS, test.Status.FAIL.value)
span.set_tag_str(test.CLASS_HIERARCHY, suite_name)
_CIVisibility.set_codeowners_of(_extract_test_file_name(instance), span=span)
_store_span(instance, span)
result = func(*args, **kwargs)
return result
return func(*args, **kwargs) |
299,908 | gather scatter python | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
class GraphConv(nn.Module):
"""A single graph convolution layer."""
def __init__(
self,
input_dim: int,
output_dim: int,
init: str = "normal",
directed: bool = False,
) -> None:
"""
Args:
input_dim: Number of input features per vertex.
output_dim: Number of output features per vertex.
init: Weight initialization method. Can be one of ['zero', 'normal'].
directed: Bool indicating if edges in the graph are directed.
"""
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.directed = directed
self.w0 = nn.Linear(input_dim, output_dim)
self.w1 = nn.Linear(input_dim, output_dim)
if init == "normal":
nn.init.normal_(self.w0.weight, mean=0, std=0.01)
nn.init.normal_(self.w1.weight, mean=0, std=0.01)
self.w0.bias.data.zero_()
self.w1.bias.data.zero_()
elif init == "zero":
self.w0.weight.data.zero_()
self.w1.weight.data.zero_()
else:
raise ValueError('Invalid GraphConv initialization "%s"' % init)
def forward(self, verts, edges):
"""
Args:
verts: FloatTensor of shape (V, input_dim) where V is the number of
vertices and input_dim is the number of input features
per vertex. input_dim has to match the input_dim specified
in __init__.
edges: LongTensor of shape (E, 2) where E is the number of edges
where each edge has the indices of the two vertices which
form the edge.
Returns:
out: FloatTensor of shape (V, output_dim) where output_dim is the
number of output features per vertex.
"""
if verts.is_cuda != edges.is_cuda:
raise ValueError("verts and edges tensors must be on the same device.")
if verts.shape[0] == 0:
# empty graph.
return verts.new_zeros((0, self.output_dim)) * verts.sum()
verts_w0 = self.w0(verts) # (V, output_dim)
verts_w1 = self.w1(verts) # (V, output_dim)
if torch.cuda.is_available() and verts.is_cuda and edges.is_cuda:
neighbor_sums = gather_scatter(verts_w1, edges, self.directed)
else:
neighbor_sums = METHOD_NAME(
verts_w1, edges, self.directed
) # (V, output_dim)
# Add neighbor features to each vertex's features.
out = verts_w0 + neighbor_sums
return out
def __repr__(self):
Din, Dout, directed = self.input_dim, self.output_dim, self.directed
return "GraphConv(%d -> %d, directed=%r)" % (Din, Dout, directed)
def METHOD_NAME(input, edges, directed: bool = False):
"""
Python implementation of gather_scatter for aggregating features of
neighbor nodes in a graph.
Given a directed graph: v0 -> v1 -> v2 the updated feature for v1 depends
on v2 in order to be consistent with Morris et al. AAAI 2019
(https://arxiv.org/abs/1810.02244). This only affects
directed graphs; for undirected graphs v1 will depend on both v0 and v2,
no matter which way the edges are physically stored.
Args:
input: Tensor of shape (num_vertices, input_dim).
edges: Tensor of edge indices of shape (num_edges, 2).
directed: bool indicating if edges are directed.
Returns:
output: Tensor of same shape as input.
"""
if not (input.dim() == 2):
raise ValueError("input can only have 2 dimensions.")
if not (edges.dim() == 2):
raise ValueError("edges can only have 2 dimensions.")
if not (edges.shape[1] == 2):
raise ValueError("edges must be of shape (num_edges, 2).")
num_vertices, input_feature_dim = input.shape
num_edges = edges.shape[0]
output = torch.zeros_like(input)
idx0 = edges[:, 0].view(num_edges, 1).expand(num_edges, input_feature_dim)
idx1 = edges[:, 1].view(num_edges, 1).expand(num_edges, input_feature_dim)
output = output.scatter_add(0, idx0, input.gather(0, idx1))
if not directed:
output = output.scatter_add(0, idx1, input.gather(0, idx0))
return output
class GatherScatter(Function):
"""
Torch autograd Function wrapper for gather_scatter C++/CUDA implementations.
"""
@staticmethod
def forward(ctx, input, edges, directed=False):
"""
Args:
ctx: Context object used to calculate gradients.
input: Tensor of shape (num_vertices, input_dim)
edges: Tensor of edge indices of shape (num_edges, 2)
directed: Bool indicating if edges are directed.
Returns:
output: Tensor of same shape as input.
"""
if not (input.dim() == 2):
raise ValueError("input can only have 2 dimensions.")
if not (edges.dim() == 2):
raise ValueError("edges can only have 2 dimensions.")
if not (edges.shape[1] == 2):
raise ValueError("edges must be of shape (num_edges, 2).")
if not (input.dtype == torch.float32):
raise ValueError("input has to be of type torch.float32.")
ctx.directed = directed
input, edges = input.contiguous(), edges.contiguous()
ctx.save_for_backward(edges)
backward = False
output = _C.gather_scatter(input, edges, directed, backward)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
edges = ctx.saved_tensors[0]
directed = ctx.directed
backward = True
grad_input = _C.gather_scatter(grad_output, edges, directed, backward)
grad_edges = None
grad_directed = None
return grad_input, grad_edges, grad_directed
gather_scatter = GatherScatter.apply |
299,909 | get close waiter | __all__ = 'create_subprocess_exec', 'create_subprocess_shell'
import subprocess
from . import events
from . import protocols
from . import streams
from . import tasks
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
self._process_exited = False
self._pipe_fds = []
self._stdin_closed = self._loop.create_future()
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append(f'stdin={self.stdin!r}')
if self.stdout is not None:
info.append(f'stdout={self.stdout!r}')
if self.stderr is not None:
info.append(f'stderr={self.stderr!r}')
return '<{}>'.format(' '.join(info))
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
self._pipe_fds.append(1)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
self._pipe_fds.append(2)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
if exc is None:
self._stdin_closed.set_result(None)
else:
self._stdin_closed.set_exception(exc)
# Since calling `wait_closed()` is not mandatory,
# we shouldn't log the traceback if this is not awaited.
self._stdin_closed._log_traceback = False
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if fd in self._pipe_fds:
self._pipe_fds.remove(fd)
self._maybe_close_transport()
def process_exited(self):
self._process_exited = True
self._maybe_close_transport()
def _maybe_close_transport(self):
if len(self._pipe_fds) == 0 and self._process_exited:
self._transport.close()
self._transport = None
def METHOD_NAME(self, stream):
if stream is self.stdin:
return self._stdin_closed
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return f'<{self.__class__.__name__} {self.pid}>'
@property
def returncode(self):
return self._transport.get_returncode()
async def wait(self):
"""Wait until the process exit and return the process return code."""
return await self._transport._wait()
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
async def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug(
'%r communicate: feed stdin (%s bytes)', self, len(input))
try:
await self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
async def _noop(self):
return None
async def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = await stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
async def communicate(self, input=None):
if input is not None:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr)
await self.wait()
return (stdout, stderr)
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
limit=streams._DEFAULT_LIMIT, **kwds):
loop = events.get_running_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, limit=streams._DEFAULT_LIMIT,
**kwds):
loop = events.get_running_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = await loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop) |
299,910 | test nr of violations | """Unit tests for the Axe accessibility collectors."""
from collector_utilities.functions import md5_hash
from tests.source_collectors.source_collector_test_case import SourceCollectorTestCase
class AxeCSVAccessibilityTest(SourceCollectorTestCase):
"""Unit tests for the Axe CSV collector for accessibility violations."""
SOURCE_TYPE = "axecsv"
METRIC_TYPE = "accessibility"
def setUp(self):
"""Extend to set up test data."""
super().setUp()
self.header_row = "URL,Violation Type,Impact,Help,HTML Element,Messages,DOM Element\n"
self.serious_violation = "url1,aria-input-field-name,serious,help1,html1\n"
self.serious_violation2 = "url2,aria-input-field-name,serious,help1,html2\n"
self.moderate_violation = "url2,aria-hidden-focus,moderate,help2,html2,messages2,dom2\n"
self.csv = self.header_row + self.serious_violation + self.moderate_violation
self.csv2 = self.header_row + self.serious_violation2 + self.moderate_violation
self.expected_entities = [
{
"url": "url1",
"violation_type": "aria-input-field-name",
"impact": "serious",
"element": None,
"page": "url1",
"description": None,
"help": "help1",
},
{
"url": "url2",
"violation_type": "aria-hidden-focus",
"impact": "moderate",
"element": "dom2",
"page": "url2",
"description": "messages2",
"help": "help2",
},
]
for entity in self.expected_entities:
entity["key"] = self.entity_key(entity)
@staticmethod
def entity_key(entity: dict[str, str | None]) -> str:
"""Create the entity hash."""
return md5_hash(",".join(str(value) for value in entity.values()))
async def METHOD_NAME(self):
"""Test that the number of violations is returned."""
response = await self.collect(get_request_text=self.csv)
self.assert_measurement(response, value="2", entities=self.expected_entities)
async def test_duplicate_violations(self):
"""Test that duplicate violations are ignored."""
self.csv += self.serious_violation
response = await self.collect(get_request_text=self.csv)
self.assert_measurement(response, value="2", entities=self.expected_entities)
async def test_no_violations(self):
"""Test zero violations."""
response = await self.collect(get_request_text="")
self.assert_measurement(response, value="0", entities=[])
async def test_filter_by_impact(self):
"""Test that violations can be filtered by impact level."""
self.set_source_parameter("impact", ["serious", "critical"])
response = await self.collect(get_request_text=self.csv)
self.assert_measurement(response, value="1")
async def test_element_include_filter(self):
"""Test that violations can be filtered by element."""
self.set_source_parameter("element_include_filter", ["dom2"])
response = await self.collect(get_request_text=self.csv)
self.assert_measurement(response, value="1", entities=[self.expected_entities[1]])
async def test_element_exclude_filter(self):
"""Test that violations can be filtered by element."""
self.set_source_parameter("element_exclude_filter", ["dom2"])
response = await self.collect(get_request_text=self.csv)
self.assert_measurement(response, value="1", entities=[self.expected_entities[0]])
async def test_zipped_csv(self):
"""Test that a zip archive with CSV files is processed correctly."""
self.set_source_parameter("url", "https://example.org/axecsv.zip")
zipfile = self.zipped_report(*[("axe1.csv", self.csv), ("axe2.csv", self.csv2)])
response = await self.collect(get_request_content=zipfile)
expected_entity = {
"url": "url2",
"violation_type": "aria-input-field-name",
"impact": "serious",
"element": None,
"page": "url2",
"description": None,
"help": "help1",
}
expected_entity["key"] = self.entity_key(expected_entity)
self.assert_measurement(response, value="3", entities=[*self.expected_entities, expected_entity])
async def test_empty_line(self):
"""Test that empty lines are ignored."""
response = await self.collect(get_request_text=self.csv + "\n\n")
self.assert_measurement(response, value="2", entities=self.expected_entities)
async def test_embedded_newlines(self):
"""Test that embedded newlines are ignored."""
violation_with_newline = 'url3,aria-hidden-focus,moderate,help3,html3,"messages3\nsecond line",dom3\n'
expected_entity = {
"url": "url3",
"violation_type": "aria-hidden-focus",
"impact": "moderate",
"element": "dom3",
"page": "url3",
"description": "messages3\nsecond line",
"help": "help3",
}
expected_entity["key"] = self.entity_key(expected_entity)
response = await self.collect(get_request_text=self.csv + violation_with_newline)
self.assert_measurement(response, value="3", entities=[*self.expected_entities, expected_entity]) |
299,911 | get behavior | import logging
from _typeshed import Incomplete, SupportsWrite
from collections.abc import Iterable, Iterator
from typing import Any, TypeVar, overload
from typing_extensions import Literal
logger: logging.Logger
DEBUG: bool
CR: str
LF: str
CRLF: str
SPACE: str
TAB: str
SPACEORTAB: str
_V = TypeVar("_V", bound=VBase)
_W = TypeVar("_W", bound=SupportsWrite[bytes])
class VBase:
group: Incomplete | None
behavior: Incomplete | None
parentBehavior: Incomplete | None
isNative: bool
def __init__(self, group: Incomplete | None = None) -> None: ...
def copy(self, copyit: VBase) -> None: ...
def validate(self, *args, **kwds) -> bool: ...
def getChildren(self) -> list[Any]: ...
def clearBehavior(self, cascade: bool = True) -> None: ...
def autoBehavior(self, cascade: bool = False) -> None: ...
def setBehavior(self, behavior, cascade: bool = True) -> None: ...
def transformToNative(self): ...
def transformFromNative(self): ...
def transformChildrenToNative(self) -> None: ...
def transformChildrenFromNative(self, clearBehavior: bool = True) -> None: ...
@overload
def serialize(
self, buf: None = None, lineLength: int = 75, validate: bool = True, behavior: Incomplete | None = None
) -> str: ...
@overload
def serialize(self, buf: _W, lineLength: int = 75, validate: bool = True, behavior: Incomplete | None = None) -> _W: ...
def toVName(name, stripNum: int = 0, upper: bool = False): ...
class ContentLine(VBase):
name: Any
encoded: Any
params: Any
singletonparams: Any
isNative: Any
lineNumber: Any
value: Any
def __init__(
self,
name,
params,
value,
group: Incomplete | None = None,
encoded: bool = False,
isNative: bool = False,
lineNumber: Incomplete | None = None,
*args,
**kwds,
) -> None: ...
@classmethod
def duplicate(cls, copyit): ...
def copy(self, copyit) -> None: ...
def __eq__(self, other): ...
def __getattr__(self, name: str): ...
def __setattr__(self, name: str, value) -> None: ...
def __delattr__(self, name: str) -> None: ...
def valueRepr(self): ...
def __unicode__(self) -> str: ...
def prettyPrint(self, level: int = 0, tabwidth: int = 3) -> None: ...
class Component(VBase):
contents: dict[str, list[VBase]]
name: Any
useBegin: bool
def __init__(self, name: Incomplete | None = None, *args, **kwds) -> None: ...
@classmethod
def duplicate(cls, copyit): ...
def copy(self, copyit) -> None: ...
def setProfile(self, name) -> None: ...
def __getattr__(self, name: str): ...
normal_attributes: Any
def __setattr__(self, name: str, value) -> None: ...
def __delattr__(self, name: str) -> None: ...
def getChildValue(self, childName, default: Incomplete | None = None, childNumber: int = 0): ...
@overload
def add(self, objOrName: _V, group: str | None = None) -> _V: ...
@overload
def add(self, objOrName: Literal["vevent"], group: str | None = None) -> Component: ...
@overload
def add(
self, objOrName: Literal["uid", "summary", "description", "dtstart", "dtend"], group: str | None = None
) -> ContentLine: ...
@overload
def add(self, objOrName: str, group: str | None = None) -> Any: ... # returns VBase sub-class
def remove(self, obj) -> None: ...
def getChildren(self) -> list[Any]: ...
def components(self) -> Iterable[Component]: ...
def lines(self): ...
def sortChildKeys(self): ...
def getSortedChildren(self): ...
def setBehaviorFromVersionLine(self, versionLine) -> None: ...
def transformChildrenToNative(self) -> None: ...
def transformChildrenFromNative(self, clearBehavior: bool = True) -> None: ...
def prettyPrint(self, level: int = 0, tabwidth: int = 3) -> None: ...
class VObjectError(Exception):
msg: Any
lineNumber: Any
def __init__(self, msg, lineNumber: Incomplete | None = None) -> None: ...
class ParseError(VObjectError): ...
class ValidateError(VObjectError): ...
class NativeError(VObjectError): ...
patterns: Any
param_values_re: Any
params_re: Any
line_re: Any
begin_re: Any
def parseParams(string): ...
def parseLine(line, lineNumber: Incomplete | None = None): ...
wrap_re: Any
logical_lines_re: Any
testLines: str
def getLogicalLines(fp, allowQP: bool = True) -> None: ...
def textLineToContentLine(text, n: Incomplete | None = None): ...
def dquoteEscape(param): ...
def foldOneLine(outbuf, input, lineLength: int = 75) -> None: ...
def defaultSerialize(obj, buf, lineLength): ...
class Stack:
stack: Any
def __len__(self) -> int: ...
def top(self): ...
def topName(self): ...
def modifyTop(self, item) -> None: ...
def push(self, obj) -> None: ...
def pop(self): ...
def readComponents(
streamOrString, validate: bool = False, transform: bool = True, ignoreUnreadable: bool = False, allowQP: bool = False
) -> Iterator[Component]: ...
def readOne(stream, validate: bool = False, transform: bool = True, ignoreUnreadable: bool = False, allowQP: bool = False): ...
def registerBehavior(behavior, name: Incomplete | None = None, default: bool = False, id: Incomplete | None = None) -> None: ...
def METHOD_NAME(name, id: Incomplete | None = None): ...
def newFromBehavior(name, id: Incomplete | None = None): ...
def backslashEscape(s): ... |
299,912 | rowcount | # mysql/mysqldb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqldb
:name: MySQL-Python
:dbapi: mysqldb
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
.. _mysqldb_unicode:
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
Py3K Support
------------
Currently, MySQLdb only runs on Python 2 and development has been stopped.
`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well
as some bugfixes.
.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
Using MySQLdb with Google Cloud SQL
-----------------------------------
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
using a URL like the following::
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
Server Side Cursors
-------------------
The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from .base import TEXT
from ... import sql
from ... import util
import re
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
@property
def METHOD_NAME(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.METHOD_NAME
class MySQLCompiler_mysqldb(MySQLCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class MySQLDialect_mysqldb(MySQLDialect):
driver = 'mysqldb'
supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
execution_ctx_cls = MySQLExecutionContext_mysqldb
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer_mysqldb
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_mysqldb, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
@util.langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__('MySQLdb.cursors').cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__('MySQLdb')
def do_executemany(self, cursor, statement, parameters, context=None):
METHOD_NAME = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = METHOD_NAME
def _check_unicode_returns(self, connection):
# work around issue fixed in
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
# specific issue w/ the utf8_bin collation and unicode returns
has_utf8_bin = self.server_version_info > (5, ) and \
connection.scalar(
"show collation where %s = 'utf8' and %s = 'utf8_bin'"
% (
self.identifier_preparer.quote("Charset"),
self.identifier_preparer.quote("Collation")
))
if has_utf8_bin:
additional_tests = [
sql.collate(sql.cast(
sql.literal_column(
"'test collated returns'"),
TEXT(charset='utf8')), "utf8_bin")
]
else:
additional_tests = []
return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
connection, additional_tests)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'read_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']
for key in keys:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + '.constants.CLIENT'
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts['client_flag'] = client_flag
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
try:
# note: the SQL here would be
# "SHOW VARIABLES LIKE 'character_set%%'"
cset_name = connection.connection.character_set_name
except AttributeError:
util.warn(
"No 'character_set_name' can be detected with "
"this MySQL-Python version; "
"please upgrade to a recent version of MySQL-Python. "
"Assuming latin1.")
return 'latin1'
else:
return cset_name()
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ',
'AUTOCOMMIT'])
def _set_isolation_level(self, connection, level):
if level == 'AUTOCOMMIT':
connection.autocommit(True)
else:
connection.autocommit(False)
super(MySQLDialect_mysqldb, self)._set_isolation_level(connection,
level)
dialect = MySQLDialect_mysqldb |
299,913 | get uids from request | # -*- coding: utf-8 -*-
#
# This file is part of SENAITE.CORE.
#
# SENAITE.CORE is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2018-2023 by it's authors.
# Some rights reserved, see README and LICENSE.
import collections
import six
from bika.lims import api
from bika.lims.browser import BrowserView
from bika.lims.interfaces import IAnalysisRequest
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from senaite.core import logger
OFF_VALUES = ["0", "off", "no"]
class MultiResultsView(BrowserView):
"""Allows to edit results of multiple samples
"""
template = ViewPageTemplateFile("templates/multi_results.pt")
def __init__(self, context, request):
super(MultiResultsView, self).__init__(context, request)
self.context = context
self.request = request
def __call__(self):
return self.template()
@property
def context_state(self):
return api.get_view(
"plone_context_state",
context=self.context, request=self.request)
def contents_table(self, sample, poc):
view_name = "table_{}_analyses".format(poc)
view = api.get_view(view_name, context=sample, request=self.request)
# Inject additional hidden field in the listing form for redirect
# https://github.com/senaite/senaite.app.listing/pull/80
view.additional_hidden_fields = [{
"name": "redirect_url",
"value": self.context_state.current_page_url(),
}]
view.update()
view.before_render()
return view.contents_table()
def show_lab_analyses(self, sample):
"""Show/Hide lab analyses
"""
analyses = sample.getAnalyses(getPointOfCapture="lab")
if len(analyses) == 0:
return False
lab_analyses = self.request.get("lab_analyses")
if lab_analyses in OFF_VALUES:
return False
return True
def show_field_analyses(self, sample):
"""Show/Hide field analyses
"""
analyses = sample.getAnalyses(getPointOfCapture="field")
if len(analyses) == 0:
return False
field_analyses = self.request.get("field_analyses", True)
if field_analyses in OFF_VALUES:
return False
return True
def get_samples(self):
"""Extract the samples from the request UIDs
This might be either a samples container or a sample context
"""
# fetch objects from request
objs = self.get_objects_from_request()
samples = []
for obj in objs:
# when coming from the samples listing
if IAnalysisRequest.providedBy(obj):
samples.append(obj)
# when coming from the WF menu inside a sample
if IAnalysisRequest.providedBy(self.context):
samples.append(self.context)
return self.uniquify_items(samples)
def uniquify_items(self, items):
"""Uniquify the items with sort order
"""
unique = []
for item in items:
if item in unique:
continue
unique.append(item)
return unique
def get_objects_from_request(self):
"""Returns a list of objects coming from the "uids" request parameter
"""
unique_uids = self.METHOD_NAME()
return filter(None, map(self.get_object_by_uid, unique_uids))
def METHOD_NAME(self):
"""Return a list of uids from the request
"""
uids = self.request.form.get("uids", "")
if isinstance(uids, six.string_types):
uids = uids.split(",")
unique_uids = collections.OrderedDict().fromkeys(uids).keys()
return filter(api.is_uid, unique_uids)
def get_object_by_uid(self, uid):
"""Get the object by UID
"""
logger.debug("get_object_by_uid::UID={}".format(uid))
obj = api.get_object_by_uid(uid, None)
if obj is None:
logger.warn("!! No object found for UID #{} !!")
return obj |
299,914 | finalize |
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" file adaptor implementation on top tof the HTTP protocol
"""
import os
import urllib.request, urllib.parse, urllib.error
import radical.utils as ru
from ...exceptions import *
from ... import url as rsurl
from ...utils import pty_shell as rsups
from ...utils import misc as rsumisc
from ... import filesystem as api_fs
from ...filesystem.constants import *
from .. import base as a_base
from ..cpi import filesystem as cpi_fs
from ..cpi import decorators as cpi_decs
SYNC_CALL = cpi_decs.SYNC_CALL
ASYNC_CALL = cpi_decs.ASYNC_CALL
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "radical.saga.adaptors.http_file"
_ADAPTOR_SCHEMAS = ["http", "https"]
_ADAPTOR_OPTIONS = []
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"metrics" : [],
"contexts" : {"userpass" : "username/password pair for ssh"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name" : _ADAPTOR_NAME,
"cfg_options" : _ADAPTOR_OPTIONS,
"capabilities" : _ADAPTOR_CAPABILITIES,
"description" : """The HTTP file adpator allows file transfer (copy) from remote resources to the local machine via the HTTP/HTTPS protocol, similar to cURL.""",
"example" : "examples/files/http_file_copy.py",
"schemas" : {"http" :"use the http protocol to access a remote file",
"https" :"use the https protocol to access a remote file"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
_ADAPTOR_INFO = {
"name": _ADAPTOR_NAME,
"version": "v0.1",
"schemas": _ADAPTOR_SCHEMAS,
"cpis": [
{
"type": "radical.saga.namespace.Entry",
"class": "HTTPFile"
},
{
"type": "radical.saga.filesystem.File",
"class": "HTTPFile"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (a_base.Base):
"""
This is the actual adaptor class, which gets loaded by SAGA (i.e. by the
SAGA engine), and which registers the CPI implementation classes which
provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
a_base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
# ----------------------------------------------------------------
#
def sanity_check(self):
pass
###############################################################################
#
class HTTPFile (cpi_fs.File):
""" Implements radical.saga.adaptors.cpi.filesystem.File
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._cpi_base = super(HTTPFile, self)
self._cpi_base.__init__(api, adaptor)
# ----------------------------------------------------------------
#
def __del__(self):
self.METHOD_NAME(kill=True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, url, flags, session):
# FIXME: eval flags!
self._logger.info("init_instance %s" % url)
if 'from_open' in adaptor_state and adaptor_state['from_open']:
self.url = rsurl.Url(url) # deep copy
self.flags = flags
self.session = session
self.valid = False # will be set by initialize
self.cwdurl = rsurl.Url(adaptor_state["cwd"])
self.cwd = self.cwdurl.path
if rsumisc.url_is_relative(self.url):
self.url = rsumisc.url_make_absolute(self.cwd, self.url)
else:
if rsumisc.url_is_relative(url):
raise BadParameter("cannot interprete relative URL in this context ('%s')" % url)
self.url = url
self.flags = flags
self.session = session
self.valid = False # will be set by initialize
self.cwd = rsumisc.url_get_dirname(url)
self.cwdurl = rsurl.Url(url) # deep copy
self.cwdurl.path = self.cwd
self.initialize()
return self.get_api()
# ----------------------------------------------------------------
#
def initialize(self):
if self.flags & CREATE_PARENTS:
raise BadParameter("File creation not supported via HTTP(S)")
elif self.flags & CREATE:
raise BadParameter("File creation not supported via HTTP(S)")
elif self.flags & WRITE:
raise BadParameter("File write not supported via HTTP(S)")
elif self.flags & READ:
pass
self.valid = True
# ----------------------------------------------------------------
#
def METHOD_NAME(self, kill=False):
# nothing to do here
pass
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
return rsurl.Url(self.url) # deep copy
# ----------------------------------------------------------------
#
@SYNC_CALL
def copy_self(self, tgt_in, flags):
src = rsurl.Url(self.url) # deep copy
tgt = rsurl.Url(tgt_in) # deep copy
if tgt.scheme != 'file':
raise BadParameter("Only file://localhost URLs are supported as copy targets.")
if tgt.host != 'localhost':
raise BadParameter("Only file://localhost URLs are supported as copy targets.")
#if rsumisc.url_is_relative (src) : src = rsumisc.url_make_absolute (cwdurl, src)
#if rsumisc.url_is_relative (tgt) : tgt = rsumisc.url_make_absolute (cwdurl, tgt)
target = ""
src_filename = os.path.basename(src.path)
local_path = tgt.path
if os.path.exists(tgt.path):
if os.path.isfile(tgt.path):
# fail if overwtrite flag is not set, otherwise copy
if flags & OVERWRITE:
target = local_path
else:
raise BadParameter("Local file '%s' exists." % local_path)
elif os.path.isdir(tgt.path):
# add source filename to target path
target = os.path.join(local_path, src_filename)
if os.path.exists(target):
if not flags & OVERWRITE:
raise BadParameter("Local file '%s' exists." % target)
try:
urllib.request.urlretrieve(str(src), target)
except Exception as e:
raise BadParameter("Couldn't copy %s to %s: %s" %
(str(src), target, str(e))) from e
# ----------------------------------------------------------------
#
@SYNC_CALL
def is_dir_self(self):
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def is_entry_self(self):
return True
# ----------------------------------------------------------------
#
@SYNC_CALL
def is_link_self(self):
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def is_file_self(self):
return True |
299,915 | prepare request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations(object):
"""ExpressRoutePortsLocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortsLocationListResult"]
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def METHOD_NAME(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
def get(
self,
location_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePortsLocation"
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore |
299,916 | unselect | import subprocess
import sys
import time
from _typeshed import ReadableBuffer, SizedBuffer
from builtins import list as _list # conflicts with a method named "list"
from collections.abc import Callable
from datetime import datetime
from re import Pattern
from socket import socket as _socket
from ssl import SSLContext, SSLSocket
from types import TracebackType
from typing import IO, Any, SupportsAbs, SupportsInt
from typing_extensions import Literal, Self, TypeAlias
__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple", "Int2AP", "ParseFlags", "Time2Internaldate", "IMAP4_SSL"]
# TODO: Commands should use their actual return types, not this type alias.
# E.g. Tuple[Literal["OK"], List[bytes]]
_CommandResults: TypeAlias = tuple[str, list[Any]]
_AnyResponseData: TypeAlias = list[None] | list[bytes | tuple[bytes, bytes]]
Commands: dict[str, tuple[str, ...]]
class IMAP4:
class error(Exception): ...
class abort(error): ...
class readonly(abort): ...
mustquote: Pattern[str]
debug: int
state: str
literal: str | None
tagged_commands: dict[bytes, _list[bytes] | None]
untagged_responses: dict[str, _list[bytes | tuple[bytes, bytes]]]
continuation_response: str
is_readonly: bool
tagnum: int
tagpre: str
tagre: Pattern[str]
welcome: bytes
capabilities: tuple[str, ...]
PROTOCOL_VERSION: str
if sys.version_info >= (3, 9):
def __init__(self, host: str = "", port: int = 143, timeout: float | None = None) -> None: ...
def open(self, host: str = "", port: int = 143, timeout: float | None = None) -> None: ...
else:
def __init__(self, host: str = "", port: int = 143) -> None: ...
def open(self, host: str = "", port: int = 143) -> None: ...
def __getattr__(self, attr: str) -> Any: ...
host: str
port: int
sock: _socket
file: IO[str] | IO[bytes]
def read(self, size: int) -> bytes: ...
def readline(self) -> bytes: ...
def send(self, data: ReadableBuffer) -> None: ...
def shutdown(self) -> None: ...
def socket(self) -> _socket: ...
def recent(self) -> _CommandResults: ...
def response(self, code: str) -> _CommandResults: ...
def append(self, mailbox: str, flags: str, date_time: str, message: ReadableBuffer) -> str: ...
def authenticate(self, mechanism: str, authobject: Callable[[bytes], bytes | None]) -> tuple[str, str]: ...
def capability(self) -> _CommandResults: ...
def check(self) -> _CommandResults: ...
def close(self) -> _CommandResults: ...
def copy(self, message_set: str, new_mailbox: str) -> _CommandResults: ...
def create(self, mailbox: str) -> _CommandResults: ...
def delete(self, mailbox: str) -> _CommandResults: ...
def deleteacl(self, mailbox: str, who: str) -> _CommandResults: ...
def enable(self, capability: str) -> _CommandResults: ...
def __enter__(self) -> Self: ...
def __exit__(self, t: type[BaseException] | None, v: BaseException | None, tb: TracebackType | None) -> None: ...
def expunge(self) -> _CommandResults: ...
def fetch(self, message_set: str, message_parts: str) -> tuple[str, _AnyResponseData]: ...
def getacl(self, mailbox: str) -> _CommandResults: ...
def getannotation(self, mailbox: str, entry: str, attribute: str) -> _CommandResults: ...
def getquota(self, root: str) -> _CommandResults: ...
def getquotaroot(self, mailbox: str) -> _CommandResults: ...
def list(self, directory: str = '""', pattern: str = "*") -> tuple[str, _AnyResponseData]: ...
def login(self, user: str, password: str) -> tuple[Literal["OK"], _list[bytes]]: ...
def login_cram_md5(self, user: str, password: str) -> _CommandResults: ...
def logout(self) -> tuple[str, _AnyResponseData]: ...
def lsub(self, directory: str = '""', pattern: str = "*") -> _CommandResults: ...
def myrights(self, mailbox: str) -> _CommandResults: ...
def namespace(self) -> _CommandResults: ...
def noop(self) -> tuple[str, _list[bytes]]: ...
def partial(self, message_num: str, message_part: str, start: str, length: str) -> _CommandResults: ...
def proxyauth(self, user: str) -> _CommandResults: ...
def rename(self, oldmailbox: str, newmailbox: str) -> _CommandResults: ...
def search(self, charset: str | None, *criteria: str) -> _CommandResults: ...
def select(self, mailbox: str = "INBOX", readonly: bool = False) -> tuple[str, _list[bytes | None]]: ...
def setacl(self, mailbox: str, who: str, what: str) -> _CommandResults: ...
def setannotation(self, *args: str) -> _CommandResults: ...
def setquota(self, root: str, limits: str) -> _CommandResults: ...
def sort(self, sort_criteria: str, charset: str, *search_criteria: str) -> _CommandResults: ...
def starttls(self, ssl_context: Any | None = None) -> tuple[Literal["OK"], _list[None]]: ...
def status(self, mailbox: str, names: str) -> _CommandResults: ...
def store(self, message_set: str, command: str, flags: str) -> _CommandResults: ...
def subscribe(self, mailbox: str) -> _CommandResults: ...
def thread(self, threading_algorithm: str, charset: str, *search_criteria: str) -> _CommandResults: ...
def uid(self, command: str, *args: str) -> _CommandResults: ...
def unsubscribe(self, mailbox: str) -> _CommandResults: ...
if sys.version_info >= (3, 9):
def METHOD_NAME(self) -> _CommandResults: ...
def xatom(self, name: str, *args: str) -> _CommandResults: ...
def print_log(self) -> None: ...
class IMAP4_SSL(IMAP4):
keyfile: str
certfile: str
if sys.version_info >= (3, 9):
def __init__(
self,
host: str = "",
port: int = 993,
keyfile: str | None = None,
certfile: str | None = None,
ssl_context: SSLContext | None = None,
timeout: float | None = None,
) -> None: ...
else:
def __init__(
self,
host: str = "",
port: int = 993,
keyfile: str | None = None,
certfile: str | None = None,
ssl_context: SSLContext | None = None,
) -> None: ...
sslobj: SSLSocket
file: IO[Any]
if sys.version_info >= (3, 9):
def open(self, host: str = "", port: int | None = 993, timeout: float | None = None) -> None: ...
else:
def open(self, host: str = "", port: int | None = 993) -> None: ...
def ssl(self) -> SSLSocket: ...
class IMAP4_stream(IMAP4):
command: str
def __init__(self, command: str) -> None: ...
file: IO[Any]
process: subprocess.Popen[bytes]
writefile: IO[Any]
readfile: IO[Any]
if sys.version_info >= (3, 9):
def open(self, host: str | None = None, port: int | None = None, timeout: float | None = None) -> None: ...
else:
def open(self, host: str | None = None, port: int | None = None) -> None: ...
class _Authenticator:
mech: Callable[[bytes], bytes | bytearray | memoryview | str | None]
def __init__(self, mechinst: Callable[[bytes], bytes | bytearray | memoryview | str | None]) -> None: ...
def process(self, data: str) -> str: ...
def encode(self, inp: bytes | bytearray | memoryview) -> str: ...
def decode(self, inp: str | SizedBuffer) -> bytes: ...
def Internaldate2tuple(resp: ReadableBuffer) -> time.struct_time | None: ...
def Int2AP(num: SupportsAbs[SupportsInt]) -> bytes: ...
def ParseFlags(resp: ReadableBuffer) -> tuple[bytes, ...]: ...
def Time2Internaldate(date_time: float | time.struct_time | time._TimeTuple | datetime | str) -> str: ... |
299,917 | call async | import asyncio
import contextvars
import inspect
import warnings
from .case import TestCase
class IsolatedAsyncioTestCase(TestCase):
# Names intentionally have a long prefix
# to reduce a chance of clashing with user-defined attributes
# from inherited test case
#
# The class doesn't call loop.run_until_complete(self.setUp()) and family
# but uses a different approach:
# 1. create a long-running task that reads self.setUp()
# awaitable from queue along with a future
# 2. await the awaitable object passing in and set the result
# into the future object
# 3. Outer code puts the awaitable and the future object into a queue
# with waiting for the future
# The trick is necessary because every run_until_complete() call
# creates a new task with embedded ContextVar context.
# To share contextvars between setUp(), test and tearDown() we need to execute
# them inside the same task.
# Note: the test case modifies event loop policy if the policy was not instantiated
# yet.
# asyncio.get_event_loop_policy() creates a default policy on demand but never
# returns None
# I believe this is not an issue in user level tests but python itself for testing
# should reset a policy in every test module
# by calling asyncio.set_event_loop_policy(None) in tearDownModule()
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self._asyncioRunner = None
self._asyncioTestContext = contextvars.copy_context()
async def asyncSetUp(self):
pass
async def asyncTearDown(self):
pass
def addAsyncCleanup(self, func, /, *args, **kwargs):
# A trivial trampoline to addCleanup()
# the function exists because it has a different semantics
# and signature:
# addCleanup() accepts regular functions
# but addAsyncCleanup() accepts coroutines
#
# We intentionally don't add inspect.iscoroutinefunction() check
# for func argument because there is no way
# to check for async function reliably:
# 1. It can be "async def func()" itself
# 2. Class can implement "async def __call__()" method
# 3. Regular "def func()" that returns awaitable object
self.addCleanup(*(func, *args), **kwargs)
async def enterAsyncContext(self, cm):
"""Enters the supplied asynchronous context manager.
If successful, also adds its __aexit__ method as a cleanup
function and returns the result of the __aenter__ method.
"""
# We look up the special methods on the type to match the with
# statement.
cls = type(cm)
try:
enter = cls.__aenter__
exit = cls.__aexit__
except AttributeError:
raise TypeError(f"'{cls.__module__}.{cls.__qualname__}' object does "
f"not support the asynchronous context manager protocol"
) from None
result = await enter(cm)
self.addAsyncCleanup(exit, cm, None, None, None)
return result
def _callSetUp(self):
# Force loop to be initialized and set as the current loop
# so that setUp functions can use get_event_loop() and get the
# correct loop instance.
self._asyncioRunner.get_loop()
self._asyncioTestContext.run(self.setUp)
self.METHOD_NAME(self.asyncSetUp)
def _callTestMethod(self, method):
if self._callMaybeAsync(method) is not None:
warnings.warn(f'It is deprecated to return a value that is not None from a '
f'test case ({method})', DeprecationWarning, stacklevel=4)
def _callTearDown(self):
self.METHOD_NAME(self.asyncTearDown)
self._asyncioTestContext.run(self.tearDown)
def _callCleanup(self, function, *args, **kwargs):
self._callMaybeAsync(function, *args, **kwargs)
def METHOD_NAME(self, func, /, *args, **kwargs):
assert self._asyncioRunner is not None, 'asyncio runner is not initialized'
assert inspect.iscoroutinefunction(func), f'{func!r} is not an async function'
return self._asyncioRunner.run(
func(*args, **kwargs),
context=self._asyncioTestContext
)
def _callMaybeAsync(self, func, /, *args, **kwargs):
assert self._asyncioRunner is not None, 'asyncio runner is not initialized'
if inspect.iscoroutinefunction(func):
return self._asyncioRunner.run(
func(*args, **kwargs),
context=self._asyncioTestContext,
)
else:
return self._asyncioTestContext.run(func, *args, **kwargs)
def _setupAsyncioRunner(self):
assert self._asyncioRunner is None, 'asyncio runner is already initialized'
runner = asyncio.Runner(debug=True)
self._asyncioRunner = runner
def _tearDownAsyncioRunner(self):
runner = self._asyncioRunner
runner.close()
def run(self, result=None):
self._setupAsyncioRunner()
try:
return super().run(result)
finally:
self._tearDownAsyncioRunner()
def debug(self):
self._setupAsyncioRunner()
super().debug()
self._tearDownAsyncioRunner()
def __del__(self):
if self._asyncioRunner is not None:
self._tearDownAsyncioRunner() |
299,918 | test load nifti | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import tempfile
import unittest
import nibabel as nib
import numpy as np
import torch
from parameterized import parameterized
from PIL import Image
from monai.data.meta_tensor import MetaTensor
from monai.transforms import EnsureChannelFirst, LoadImage
from monai.utils import optional_import
itk, has_itk = optional_import("itk", allow_namespace_pkg=True)
ITKReader, _ = optional_import("monai.data", name="ITKReader", as_type="decorator")
TEST_CASE_1 = [{}, ["test_image.nii.gz"], None]
TEST_CASE_2 = [{}, ["test_image.nii.gz"], -1]
TEST_CASE_3 = [{}, ["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"], None]
TEST_CASE_4 = [{"reader": ITKReader() if has_itk else "itkreader"}, ["test_image.nii.gz"], None]
TEST_CASE_5 = [{"reader": ITKReader() if has_itk else "itkreader"}, ["test_image.nii.gz"], -1]
TEST_CASE_6 = [
{"reader": ITKReader() if has_itk else "itkreader"},
["test_image.nii.gz", "test_image2.nii.gz", "test_image3.nii.gz"],
None,
]
class TestEnsureChannelFirst(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6])
@unittest.skipUnless(has_itk, "itk not installed")
def METHOD_NAME(self, input_param, filenames, original_channel_dim):
if original_channel_dim is None:
test_image = np.random.rand(8, 8, 8)
elif original_channel_dim == -1:
test_image = np.random.rand(8, 8, 8, 1)
with tempfile.TemporaryDirectory() as tempdir:
for i, name in enumerate(filenames):
filenames[i] = os.path.join(tempdir, name)
nib.save(nib.Nifti1Image(test_image, np.eye(4)), filenames[i])
result = LoadImage(image_only=True, **input_param)(filenames)
result = EnsureChannelFirst()(result)
self.assertEqual(result.shape[0], len(filenames))
@unittest.skipUnless(has_itk, "itk not installed")
def test_itk_dicom_series_reader(self):
filenames = "tests/testing_data/CT_DICOM"
itk.ProcessObject.SetGlobalWarningDisplay(False)
result = LoadImage(image_only=True, reader=ITKReader(pixel_type=itk.UC))(filenames)
result = EnsureChannelFirst()(result)
self.assertEqual(result.shape[0], 1)
def test_load_png(self):
spatial_size = (6, 6, 3)
test_image = np.random.randint(0, 6, size=spatial_size)
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "test_image.png")
Image.fromarray(test_image.astype("uint8")).save(filename)
result = LoadImage(image_only=True)(filename)
result = EnsureChannelFirst()(result)
self.assertEqual(result.shape[0], 3)
result = EnsureChannelFirst(channel_dim=-1)(result)
self.assertEqual(result.shape, (6, 3, 6))
def test_check(self):
im = torch.zeros(1, 2, 3)
im_nodim = MetaTensor(im, meta={"original_channel_dim": None})
with self.assertRaises(ValueError): # not MetaTensor
EnsureChannelFirst(channel_dim=None)(im)
with self.assertRaises(ValueError): # no meta
EnsureChannelFirst(channel_dim=None)(MetaTensor(im))
with self.assertRaises(ValueError): # no meta channel
EnsureChannelFirst()(im_nodim)
with self.assertWarns(Warning):
EnsureChannelFirst(strict_check=False, channel_dim=None)(im)
with self.assertWarns(Warning):
EnsureChannelFirst(strict_check=False, channel_dim=None)(im_nodim)
def test_default_channel_first(self):
im = torch.rand(4, 4)
result = EnsureChannelFirst(channel_dim="no_channel")(im)
self.assertEqual(result.shape, (1, 4, 4))
if __name__ == "__main__":
unittest.main() |
299,919 | test lsplatform2 | # Data Parallel Control (dpctl)
#
# Copyright 2020-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines unit test cases for the SyclPlatform class.
"""
import sys
import pytest
from helper import has_sycl_platforms
import dpctl
list_of_valid_filter_selectors = [
"opencl",
"opencl:gpu",
"opencl:cpu",
"opencl:gpu:0",
"gpu",
"cpu",
"level_zero",
"level_zero:gpu",
"opencl:cpu:0",
"level_zero:gpu:0",
"gpu:0",
"gpu:1",
"1",
]
list_of_invalid_filter_selectors = [
"-1",
"opencl:gpu:-1",
"cuda:cpu:0",
"abc",
]
def check_name(platform):
try:
platform.name
except Exception:
pytest.fail("Encountered an exception inside platform.name.")
def check_vendor(platform):
try:
platform.vendor
except Exception:
pytest.fail("Encountered an exception inside platform.vendor.")
def check_version(platform):
try:
platform.version
except Exception:
pytest.fail("Encountered an exception inside platform.version.")
def check_backend(platform):
try:
platform.backend
except Exception:
pytest.fail("Encountered an exception inside platform.backend.")
def check_print_info(platform):
try:
platform.print_platform_info()
except Exception:
pytest.fail("Encountered an exception inside print_info().")
def check_repr(platform):
r = repr(platform)
assert type(r) is str
assert r != ""
def check_default_context(platform):
if "linux" not in sys.platform:
return
r = platform.default_context
assert type(r) is dpctl.SyclContext
def check_equal_and_hash(platform):
assert platform == platform
if "linux" not in sys.platform:
return
default_ctx = platform.default_context
for d in default_ctx.get_devices():
assert platform == d.sycl_platform
assert hash(platform) == hash(d.sycl_platform)
def check_hash_in_dict(platform):
map = {platform: 0}
assert map[platform] == 0
list_of_checks = [
check_name,
check_vendor,
check_version,
check_backend,
check_print_info,
check_repr,
check_default_context,
check_equal_and_hash,
check_hash_in_dict,
]
@pytest.fixture(params=list_of_valid_filter_selectors)
def valid_filter(request):
return request.param
@pytest.fixture(params=list_of_invalid_filter_selectors)
def invalid_filter(request):
return request.param
@pytest.fixture(params=list_of_checks)
def check(request):
return request.param
def test_platform_creation(valid_filter, check):
"""Tests if we can create a SyclPlatform using a supported filter selector
string.
"""
platform = None
try:
platform = dpctl.SyclPlatform(valid_filter)
except ValueError:
pytest.skip("Failed to create platform with supported filter")
check(platform)
def test_default_platform_creation(check):
platform = None
try:
platform = dpctl.SyclPlatform()
except ValueError:
pytest.skip("Failed to create default platform")
check(platform)
def test_invalid_platform_creation(invalid_filter, check):
"""Tests if we can create a SyclPlatform using a supported filter selector
string.
"""
with pytest.raises(ValueError):
dpctl.SyclPlatform(invalid_filter)
def test_lsplatform():
try:
dpctl.lsplatform()
except Exception:
pytest.fail("Encountered an exception inside lsplatform().")
def test_lsplatform0():
try:
dpctl.lsplatform(0)
except Exception:
pytest.fail("Encountered an exception inside lsplatform().")
def test_lsplatform1():
try:
dpctl.lsplatform(1)
except Exception:
pytest.fail("Encountered an exception inside lsplatform().")
def METHOD_NAME():
try:
dpctl.lsplatform(2)
except Exception:
pytest.fail("Encountered an exception inside lsplatform().")
def test_lsplatform3():
try:
with pytest.warns(UserWarning):
dpctl.lsplatform(3)
except Exception:
pytest.fail("Encountered an exception inside lsplatform().")
def test_get_platforms():
try:
platforms = dpctl.get_platforms()
if platforms:
assert has_sycl_platforms()
except Exception:
pytest.fail("Encountered an exception inside get_platforms().") |
299,920 | resolve alias or index | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .normalizing import NormalizedDict
from .robottypes import is_string
class ConnectionCache:
"""Cache for libraries to use with concurrent connections, processes, etc.
The cache stores the registered connections (or other objects) and allows
switching between them using generated indices or user given aliases.
This is useful with any library where there's need for multiple concurrent
connections, processes, etc.
This class is used also outside the core framework by SeleniumLibrary,
SSHLibrary, etc. Backwards compatibility is thus important when doing changes.
"""
def __init__(self, no_current_msg='No open connection.'):
self._no_current = NoConnection(no_current_msg)
self.current = self._no_current #: Current active connection.
self._connections = []
self._aliases = NormalizedDict()
@property
def current_index(self):
if not self:
return None
for index, conn in enumerate(self):
if conn is self.current:
return index + 1
@current_index.setter
def current_index(self, index):
self.current = self._connections[index - 1] \
if index is not None else self._no_current
def register(self, connection, alias=None):
"""Registers given connection with optional alias and returns its index.
Given connection is set to be the :attr:`current` connection.
If alias is given, it must be a string. Aliases are case and space
insensitive.
The index of the first connection after initialization, and after
:meth:`close_all` or :meth:`empty_cache`, is 1, second is 2, etc.
"""
self.current = connection
self._connections.append(connection)
index = len(self._connections)
if is_string(alias):
self._aliases[alias] = index
return index
def switch(self, alias_or_index):
"""Switches to the connection specified by the given alias or index.
Updates :attr:`current` and also returns its new value.
Alias is whatever was given to :meth:`register` method and indices
are returned by it. Index can be given either as an integer or
as a string that can be converted to an integer. Raises an error
if no connection with the given index or alias found.
"""
self.current = self.get_connection(alias_or_index)
return self.current
def get_connection(self, alias_or_index=None):
"""Get the connection specified by the given alias or index..
If ``alias_or_index`` is ``None``, returns the current connection
if it is active, or raises an error if it is not.
Alias is whatever was given to :meth:`register` method and indices
are returned by it. Index can be given either as an integer or
as a string that can be converted to an integer. Raises an error
if no connection with the given index or alias found.
"""
if alias_or_index is None:
if not self:
self.current.raise_error()
return self.current
try:
index = self.METHOD_NAME(alias_or_index)
except ValueError as err:
raise RuntimeError(err.args[0])
return self._connections[index-1]
__getitem__ = get_connection
def close_all(self, closer_method='close'):
"""Closes connections using given closer method and empties cache.
If simply calling the closer method is not adequate for closing
connections, clients should close connections themselves and use
:meth:`empty_cache` afterwards.
"""
for conn in self._connections:
getattr(conn, closer_method)()
self.empty_cache()
return self.current
def empty_cache(self):
"""Empties the connection cache.
Indexes of the new connections starts from 1 after this.
"""
self.current = self._no_current
self._connections = []
self._aliases = NormalizedDict()
def __iter__(self):
return iter(self._connections)
def __len__(self):
return len(self._connections)
def __bool__(self):
return self.current is not self._no_current
def METHOD_NAME(self, alias_or_index):
for resolver in self._resolve_alias, self._resolve_index, self._is_connection:
try:
return resolver(alias_or_index)
except ValueError:
pass
raise ValueError(f"Non-existing index or alias '{alias_or_index}'.")
def _resolve_alias(self, alias):
if is_string(alias) and alias in self._aliases:
return self._aliases[alias]
raise ValueError
def _resolve_index(self, index):
try:
index = int(index)
except TypeError:
raise ValueError
if not 0 < index <= len(self._connections):
raise ValueError
return index
def _is_connection(self, conn):
return self._connections.index(conn) + 1
class NoConnection:
def __init__(self, message):
self.message = message
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
raise AttributeError
self.raise_error()
def raise_error(self):
raise RuntimeError(self.message)
def __bool__(self):
return False |
299,921 | message | # -*- coding: utf-8 -*-
# Hikari Examples - A collection of examples for Hikari.
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain worldwide.
# This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along with this software.
# If not, see <https://creativecommons.org/publicdomain/zero/1.0/>.
"""A simple bot to demonstrate how to use rillrate with hikari to make a web dashboard for the bot.
Just visit `http://localhost:6361/ui/` to explore your dashboard!
"""
import logging
import os
import rillrate
from rillrate import prime as rr_prime
import hikari
PREFIX = ","
# Name used to group dashboards.
# You could have multiple packages for different applications, such as a package for the bot
# dashboards, and another package for a web server running alongside the bot.
PACKAGE = "Rillrate Example"
# Dashboards are a part inside of package, they can be used to group different types of
# dashboards that you may want to use, like a dashboard for system status, another dashboard
# for cache status, and another one to configure features or trigger actions on the bot.
DASHBOARD = "Control Panel"
# These are menus inside the dashboard, you can use them to group specific sets
# of data inside the same dashboard.
GROUP_CONFIG = "1 - Example"
# All the 3 configurable namespaces are sorted alphabetically.
# Class with all our dashboard logic
class RillRateDashboard:
"""Global data shared across the entire bot, used to store dashboard values."""
__slots__ = ("logger", "value", "selector", "slider")
def __init__(self) -> None:
self.logger = logging.getLogger("dashboard")
self.value = 0
# Install rillrate - Spins up the rillrate service in a separate thread, making it non-blocking :)
rillrate.install()
# Register the dashboard objects
dummy_values = [str(i) for i in range(0, 256 + 1, 32)]
self.selector = rr_prime.Selector(
f"{PACKAGE}.{DASHBOARD}.{GROUP_CONFIG}.Selector", label="Choose!", options=dummy_values
)
self.slider = rr_prime.Slider(
f"{PACKAGE}.{DASHBOARD}.{GROUP_CONFIG}.Slider", label="More fine grain control", min=0, max=256, step=2
)
# Add sync callbacks - This way we tell rillrate what functions to call when a sync event occurs
self.selector.sync_callback(self._selector_callback)
self.slider.sync_callback(self._slider_callback)
def _selector_callback(self, activity: rillrate.Activity, action: rillrate.Action) -> None:
self.logger.info("Selector activity: %s | action = %s", activity, action)
if action is not None:
value = int(action.value)
self.logger.info("Selected: %s", value)
# Update the slider too, so they show the same value.
self.slider.apply(value)
# Overwrite the current stored value on the global data with the new selected value.
self.value = value
def _slider_callback(self, activity: rillrate.Activity, action: rillrate.Action) -> None:
self.logger.info("Slider activity: %s | action = %s", activity, action)
if action is not None:
value = int(action.value)
self.logger.info("Slided to: %s", value)
# Update the selector too, so they show the same value.
# It is important to note that since not all values are present in the selector, it might be empty sometimes
self.selector.apply(str(value))
# Overwrite the current stored value on the global data with the new selected value.
self.value = value
bot = hikari.GatewayBot(token=os.environ["BOT_TOKEN"])
dashboard = RillRateDashboard()
def is_command(cmd_name: str, content: str) -> bool:
"""Check if the message sent is a valid command."""
return content == f"{PREFIX}{cmd_name}"
@bot.listen()
async def METHOD_NAME(event: hikari.GuildMessageCreateEvent) -> None:
"""Listen for messages being created."""
if not event.is_human or not event.content:
return
# Command Framework 101 :D
if event.content.startswith(PREFIX):
if is_command("ping", event.content):
await event.METHOD_NAME.respond("Pong!")
elif is_command("value", event.content):
await event.METHOD_NAME.respond(f"Current value: {dashboard.value}")
bot.run() |
299,922 | refresh | import collections
from qtpy import QtCore, QtGui
from openpype.lib import Logger
class LogModel(QtGui.QStandardItemModel):
COLUMNS = (
"process_name",
"hostname",
"hostip",
"username",
"system_name",
"started"
)
colums_mapping = {
"process_name": "Process Name",
"process_id": "Process Id",
"hostname": "Hostname",
"hostip": "Host IP",
"username": "Username",
"system_name": "System name",
"started": "Started at"
}
process_keys = (
"process_id", "hostname", "hostip",
"username", "system_name", "process_name"
)
log_keys = (
"timestamp", "level", "thread", "threadName", "message", "loggerName",
"fileName", "module", "method", "lineNumber"
)
default_value = "- Not set -"
ROLE_LOGS = QtCore.Qt.UserRole + 2
ROLE_PROCESS_ID = QtCore.Qt.UserRole + 3
def __init__(self, parent=None):
super(LogModel, self).__init__(parent)
self.log_by_process = None
self.dbcon = None
# Crash if connection is not possible to skip this module
if not Logger.initialized:
Logger.initialize()
connection = Logger.get_log_mongo_connection()
if connection:
Logger.bootstrap_mongo_log()
database = connection[Logger.log_database_name]
self.dbcon = database[Logger.log_collection_name]
def headerData(self, section, orientation, role):
if (
role == QtCore.Qt.DisplayRole
and orientation == QtCore.Qt.Horizontal
):
if section < len(self.COLUMNS):
key = self.COLUMNS[section]
return self.colums_mapping.get(key, key)
super(LogModel, self).headerData(section, orientation, role)
def add_process_logs(self, process_logs):
items = []
first_item = True
for key in self.COLUMNS:
display_value = str(process_logs[key])
item = QtGui.QStandardItem(display_value)
if first_item:
first_item = False
item.setData(process_logs["_logs"], self.ROLE_LOGS)
item.setData(process_logs["process_id"], self.ROLE_PROCESS_ID)
items.append(item)
self.appendRow(items)
def METHOD_NAME(self):
self.log_by_process = collections.defaultdict(list)
self.process_info = {}
self.clear()
self.beginResetModel()
if self.dbcon:
result = self.dbcon.find({})
for item in result:
process_id = item.get("process_id")
# backwards (in)compatibility
if not process_id:
continue
if process_id not in self.process_info:
proc_dict = {"_logs": []}
for key in self.process_keys:
proc_dict[key] = (
item.get(key) or self.default_value
)
self.process_info[process_id] = proc_dict
log_item = {}
for key in self.log_keys:
log_item[key] = item.get(key) or self.default_value
if "exception" in item:
log_item["exception"] = item["exception"]
self.process_info[process_id]["_logs"].append(log_item)
for item in self.process_info.values():
item["_logs"] = sorted(
item["_logs"], key=lambda item: item["timestamp"]
)
item["started"] = item["_logs"][0]["timestamp"]
self.add_process_logs(item)
self.endResetModel()
class LogsFilterProxy(QtCore.QSortFilterProxyModel):
def __init__(self, *args, **kwargs):
super(LogsFilterProxy, self).__init__(*args, **kwargs)
self.col_usernames = None
self.filter_usernames = set()
def update_users_filter(self, users):
self.filter_usernames = set()
for user in users or tuple():
self.filter_usernames.add(user)
self.invalidateFilter()
def filterAcceptsRow(self, source_row, source_parent):
if self.col_usernames is not None:
index = self.sourceModel().index(
source_row, self.col_usernames, source_parent
)
user = index.data(QtCore.Qt.DisplayRole)
if user not in self.filter_usernames:
return False
return True |
299,923 | factorize | #!/usr/bin/python
#
# Copyright (C) Christian Thurau, 2010.
# Licensed under the GNU General Public License (GPL).
# http://www.gnu.org/licenses/gpl.txt
"""
PyMF Principal Component Analysis.
PCA: Class for Principal Component Analysis
"""
import numpy as np
from .nmf import NMF
from .svd import SVD
__all__ = ["PCA"]
class PCA(NMF):
"""
PCA(data, num_bases=4, center_mean=True)
Archetypal Analysis. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. W is set to the eigenvectors of the
data covariance.
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
center_mean: bool, True
Make sure that the data is centred around the mean.
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying PCA to some rather stupid data set:
>>> import numpy as np
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
>>> pca_mdl = PCA(data, num_bases=2)
>>> pca_mdl.factorize()
The basis vectors are now stored in pca_mdl.W, the coefficients in pca_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to pca_mdl.W, and set compute_w to False:
>>> data = np.array([[1.5], [1.2]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> pca_mdl = PCA(data, num_bases=2)
>>> pca_mdl.W = W
>>> pca_mdl.factorize(compute_w=False)
The result is a set of coefficients pca_mdl.H, s.t. data = W * pca_mdl.H.
"""
def __init__(self, data, num_bases=0, center_mean=True):
NMF.__init__(self, data, num_bases=num_bases)
# center the data around the mean first
self._center_mean = center_mean
if self._center_mean:
# copy the data before centering it
self._data_orig = data
self._meanv = self._data_orig[:,:].mean(axis=1).reshape(data.shape[0],-1)
self.data = self._data_orig - self._meanv
else:
self.data = data
def init_h(self):
pass
def init_w(self):
pass
def update_h(self):
self.H = np.dot(self.W.T, self.data[:,:])
def update_w(self):
# compute eigenvectors and eigenvalues using SVD
svd_mdl = SVD(self.data)
svd_mdl.METHOD_NAME()
# argsort sorts in ascending order -> do reverese indexing
# for accesing values in descending order
S = np.diag(svd_mdl.S)
order = np.argsort(S)[::-1]
# select only a few eigenvectors ...
if self._num_bases >0:
order = order[:self._num_bases]
self.W = svd_mdl.U[:,order]
self.eigenvalues = S[order]
def METHOD_NAME(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, niter=1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
NMF.METHOD_NAME(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err)
if __name__ == "__main__":
import doctest
doctest.testmod() |
299,924 | compute dp drho | #-------------------------------------------------------------------------------
# LinearPolynomialEquationOfState
#-------------------------------------------------------------------------------
from PYB11Generator import *
from SolidEquationOfState import *
from EOSAbstractMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralSolidMaterial")
class LinearPolynomialEquationOfState(SolidEquationOfState):
"""LinearPolynomialEquationOfState -- An equation of state approximated by a
linear polynomial, i.e.:
P(rho, e) = A0 + A1*mu + a2*mu^2 + a3*mu^3 + (B0 + B1*mu + B2*mu^2)*e
mu = rho/rho0 - 1.0"""
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef Field<%(Dimension)s, Scalar> ScalarField;
"""
#...........................................................................
# Constructors
def pyinit(self,
referenceDensity = "const double",
etamin = "const double",
etamax = "const double",
a0 = "const double",
a1 = "const double",
a2 = "const double",
a3 = "const double",
b0 = "const double",
b1 = "const double",
b2 = "const double",
atomicWeight = "const double",
constants = "const PhysicalConstants&",
externalPressure = ("const double", "0.0"),
minimumPressure = ("const double", "std::numeric_limits<double>::lowest()"),
maximumPressure = ("const double", "std::numeric_limits<double>::max()"),
minimumPressureDamage = ("const double", "0.0"),
minPressureType = ("const MaterialPressureMinType", "MaterialPressureMinType::PressureFloor")):
"Linear-polynomial EOS"
#...........................................................................
# Methods
@PYB11const
def pressure(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def temperature(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def specificThermalEnergy(self,
massDensity = "const Scalar",
temperature = "const Scalar"):
return "Scalar"
@PYB11const
def specificHeat(self,
massDensity = "const Scalar",
temperature = "const Scalar"):
return "Scalar"
@PYB11const
def soundSpeed(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def gamma(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def bulkModulus(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def entropy(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
return "Scalar"
@PYB11const
def METHOD_NAME(self,
massDensity = "const Scalar",
specificThermalEnergy = "const Scalar"):
"Compute the derivative of the pressure with respect to the density."
return "double"
#...........................................................................
# Properties
a0 = PYB11property("double", "a0", "a0")
a1 = PYB11property("double", "a1", "a1")
a2 = PYB11property("double", "a2", "a2")
a3 = PYB11property("double", "a3", "a3")
b0 = PYB11property("double", "b0", "b0")
b1 = PYB11property("double", "b1", "b1")
b2 = PYB11property("double", "b2", "b2")
atomicWeight = PYB11property("double", "atomicWeight", "atomicWeight")
externalPressure = PYB11property("double", "externalPressure", "externalPressure")
#-------------------------------------------------------------------------------
# Inject EOS interface
#-------------------------------------------------------------------------------
PYB11inject(EOSAbstractMethods, LinearPolynomialEquationOfState, virtual=True, pure_virtual=False) |
299,925 | update grids | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from qtpy import QtWidgets
from pyqtgraph.opengl import GLSurfacePlotItem
from pyqtgraph.opengl import GLViewWidget, GLGridItem
from pyqtgraph import GraphicsLayoutWidget
from .HistogramLUTItem import HistogramLUTItem
class SurfaceWidget(QtWidgets.QWidget):
iteration_name = ''
def __init__(self):
super(SurfaceWidget, self).__init__()
self.lut_pg_layout = GraphicsLayoutWidget()
self.pg_layout = GLViewWidget()
self.pg_layout.setCameraPosition(distance=3)
self.surf_view_item = None
self.pressed_key = None
self.show_range = np.array([0.0, 1.0])
self.show_scale = np.array([2., 2., 1.])
self.g_translate = 0
self.g_pos = 0
self.marker = 0
self.marker_color = [1, 0, 0]
self.marker_size = 5
self.data = None
self.create_graphics()
self._lut_lo = QtWidgets.QVBoxLayout()
self._lut_lo.setContentsMargins(0, 0, 0, 0)
self._lut_lo.addWidget(self.lut_pg_layout)
self._lut_w = QtWidgets.QWidget()
self._lut_w.setMaximumHeight(80)
self._lut_w.setLayout(self._lut_lo)
self._layout = QtWidgets.QVBoxLayout()
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(0)
self._layout.addWidget(self._lut_w)
self._layout.addWidget(self.pg_layout)
self.img_histogram_LUT_horizontal = HistogramLUTItem()
self.img_histogram_LUT_horizontal.gradient.loadPreset('jet')
self.img_histogram_LUT_horizontal.sigLevelsChanged.connect(self.update_color)
self.img_histogram_LUT_horizontal.sigLevelChangeFinished.connect(self.update_color)
self.img_histogram_LUT_horizontal.sigLookupTableChanged.connect(self.update_color)
self.lut_pg_layout.addItem(self.img_histogram_LUT_horizontal, 0, 1)
self.setLayout(self._layout)
def create_graphics(self):
self.back_grid = GLGridItem()
self.back_grid.rotate(90, 0, 1, 0)
self.back_grid.setSize(1, 1, 0)
self.back_grid.setSpacing(1, 0.1, 1)
self.back_grid.setDepthValue(10) # draw grid after surfaces since they may be translucent
self.pg_layout.addItem(self.back_grid)
self.base_grid = GLGridItem()
self.base_grid.setSize(26, 4000, 0)
self.base_grid.setSpacing(1, 100, 1)
self.base_grid.setDepthValue(10) # draw grid after surfaces since they may be translucent
self.pg_layout.addItem(self.base_grid)
# self.axis = CustomAxis(self.pg_layout)
self.surf_view_item = GLSurfacePlotItem(z=np.array([[0]]),
colors=np.array([[0, 0, 0, 0]]),
smooth=False)
self.surf_view_item.setGLOptions('translucent')
self.pg_layout.addItem(self.surf_view_item)
def update_color(self):
if self.data is not None:
colors = self.get_colors(self.data).reshape(-1, 4)
self.surf_view_item.setData(z=self.data, colors=colors)
def plot_surface(self, data, start, step):
self.g_pos = int((self.g_translate - start) / step)
colors = self.get_colors(data).reshape(-1, 4)
abs_range = self.show_range * (np.nanmax(data) - np.nanmin(data)) + np.nanmin(data)
self.data = np.copy(data)
self.data[self.data > abs_range[1]] = abs_range[1]
self.data[self.data < abs_range[0]] = abs_range[0]
self.surf_view_item.setData(z=self.data, colors=colors)
self.img_histogram_LUT_horizontal.imageChanged(img_data=self.data)
self.img_histogram_LUT_horizontal.setLevels(np.nanmin(self.data), np.nanmax(self.data))
# self.axis.setSize(*self.show_scale)
self.METHOD_NAME(data)
def METHOD_NAME(self, data):
self.back_grid.setSize(np.nanmax(data), self.data.shape[1], 0)
self.base_grid.setSize(self.data.shape[0], self.data.shape[1], 0)
scale = [self.show_scale[0] / data.shape[0],
self.show_scale[1] / data.shape[1],
self.show_scale[2] / np.nanmax(data)]
self.surf_view_item.resetTransform()
self.surf_view_item.translate(-data.shape[0] / 2., -data.shape[1] / 2., 0)
self.surf_view_item.scale(*scale, local=False)
self.back_grid.resetTransform()
self.back_grid.rotate(90, 0, 1, 0)
self.back_grid.translate(-data.shape[0] / 2, 0, np.nanmax(data) / 2. + np.nanmin(data))
self.back_grid.scale(*scale, local=False)
self.base_grid.resetTransform()
self.base_grid.translate(0, 0, np.nanmin(data))
self.base_grid.scale(*scale, local=False)
# self.axis.setSize(*self.show_scale)
# self.axis.translate(-data.shape[0] / 2, 0, np.nanmax(data) / 2. + np.nanmin(data))
# self.axis.diff = [self.show_scale[0] * self.g_pos / data.shape[0], 0, 0]
def get_colors(self, data):
lut = self.img_histogram_LUT_horizontal.gradient.getLookupTable(256) / 256.
level = self.img_histogram_LUT_horizontal.getExpLevels()
# int_data = ((data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data)) * 255).astype(int)
min = np.nanmin(data)
if level[0] > 1:
min = level[0]
int_data = ((data - min) / (level[1] - min) * 255).astype(int)
int_data[int_data > 255] = 255
int_data[int_data < 0] = 0
int_data = np.nan_to_num(int_data)
int_data[int_data < 0] = 0
colors_rgb = lut[int_data]
colors = np.ones((colors_rgb.shape[0], colors_rgb.shape[1], 4))
colors[..., :3] = colors_rgb
colors[:, int(self.marker):int(self.marker) + self.marker_size, :3] = self.marker_color
colors[self.g_pos, :, :3] = self.marker_color
return colors |
299,926 | load header | #
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from astLib import astWCS, astCoords
# astlib requires pyfits (or astropy) in order
# to create a WCS object from a FITS header.
from astropy.io import fits as pyfits
from ginga.util.wcsmod import common
astWCS.NUMPY_MODE = True
coord_types = ['j2000', 'b1950', 'galactic']
class AstLibWCS(common.BaseWCS):
"""
A WCS interface for astLib.astWCS.WCS
You need to install python module 'astLib'
(http://sourceforge.net/projects/astlib)
if you want to use this version.
"""
def __init__(self, logger):
super(AstLibWCS, self).__init__(logger)
self.kind = 'astlib/wcstools'
def METHOD_NAME(self, header, fobj=None):
self.header = {}
self.header.update(header.items())
self.fix_bad_headers()
try:
# reconstruct a pyfits header
hdr = pyfits.Header(header.items())
self.logger.debug("Trying to make astLib wcs object")
self.wcs = astWCS.WCS(hdr, mode='pyfits')
self.coordsys = self.get_coord_system_name(self.header)
self.logger.debug("Coordinate system is: %s" % (self.coordsys))
except Exception as e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def get_coord_system_name(self, header):
coordsys = common.get_coord_system_name(header)
coordsys = coordsys.upper()
if coordsys in ('FK4',):
return 'b1950'
elif coordsys in ('FK5', 'ICRS'):
return 'j2000'
elif coordsys in ('PIXEL',):
return 'pixel'
#raise common.WCSError("Cannot determine appropriate coordinate system from FITS header") # noqa
return 'j2000'
def spectral_coord(self, idxs, coords='data'):
raise common.WCSError("This feature not supported by astWCS")
def pixtoradec(self, idxs, coords='data'):
if coords == 'fits':
# Via astWCS.NUMPY_MODE, we've forced pixels referenced from 0
idxs = tuple(map(lambda x: x - 1, idxs))
try:
ra_deg, dec_deg = self.wcs.pix2wcs(idxs[0], idxs[1])
except Exception as e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise common.WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
try:
x, y = self.wcs.wcs2pix(ra_deg, dec_deg)
except Exception as e:
self.logger.error("Error calculating radectopix: %s" % (str(e)))
raise common.WCSError(e)
if coords == 'fits':
# Via astWCS.NUMPY_MODE, we've forced pixels referenced from 0
x, y = x + 1, y + 1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise common.WCSError("No usable WCS")
if system is None:
system = 'j2000'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
try:
fromsys = self.coordsys.upper()
if fromsys == 'PIXEL':
# these are really pixel values
return (ra_deg, dec_deg)
tosys = system.upper()
if fromsys == 'B1950':
equinox = 1950.0
else:
equinox = 2000.0
lon_deg, lat_deg = astCoords.convertCoords(fromsys, tosys,
ra_deg, dec_deg,
equinox)
except Exception as e:
raise common.WCSError(
"Error converting between coordinate systems "
"'%s' and '%s': %s" % (fromsys, tosys, str(e)))
return (lon_deg, lat_deg)
# register our WCS with ginga
common.register_wcs('astlib', AstLibWCS, coord_types) |
299,927 | get installed device attribute vi int32 | # -*- coding: utf-8 -*-
# This file was generated
import array
import ctypes
import hightime # noqa: F401
import nimodinst._library_singleton as _library_singleton
import nimodinst._visatype as _visatype
import nimodinst.errors as errors
# Helper functions for creating ctypes needed for calling into the driver DLL
def _get_ctypes_pointer_for_buffer(value=None, library_type=None, size=None):
if isinstance(value, array.array):
assert library_type is not None, 'library_type is required for array.array'
addr, _ = value.buffer_info()
return ctypes.cast(addr, ctypes.POINTER(library_type))
elif str(type(value)).find("'numpy.ndarray'") != -1:
import numpy
return numpy.ctypeslib.as_ctypes(value)
elif isinstance(value, bytes):
return ctypes.cast(value, ctypes.POINTER(library_type))
elif isinstance(value, list):
assert library_type is not None, 'library_type is required for list'
return (library_type * len(value))(*value)
else:
if library_type is not None and size is not None:
return (library_type * size)()
else:
return None
def _convert_to_array(value, array_type):
if value is not None:
if isinstance(value, array.array):
value_array = value
else:
value_array = array.array(array_type, value)
else:
value_array = None
return value_array
class LibraryInterpreter(object):
'''Library C<->Python interpreter.
This class is responsible for interpreting the Library's C API. It is responsible for:
* Converting ctypes to native Python types.
* Dealing with string encoding.
* Allocating memory.
* Converting errors returned by Library into Python exceptions.
'''
def __init__(self, encoding):
self._encoding = encoding
self._library = _library_singleton.get()
# Initialize _handle to 0 for now.
# Session will directly update it once the driver runtime init function has been called and
# we have a valid session handle.
self.set_session_handle()
def set_session_handle(self, value=0):
self._handle = value
def get_session_handle(self):
return self._handle
def get_error_description(self, error_code):
'''get_error_description
Returns the error description.
'''
# We hand-maintain the code that calls into the cfunc rather than leverage code-generation
# because niModInst_GetExtendedErrorInfo() does not properly do the IVI-dance.
# See https://github.com/ni/nimi-python/issues/166
error_info_buffer_size_ctype = _visatype.ViInt32() # case S170
error_info_ctype = None # case C050
error_code = self._library.niModInst_GetExtendedErrorInfo(error_info_buffer_size_ctype, error_info_ctype)
if error_code <= 0:
return 'Failed to retrieve error description.'
error_info_buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
error_info_ctype = (_visatype.ViChar * error_info_buffer_size_ctype.value)() # case C060
# Note we don't look at the return value. This is intentional as niModInst returns the
# original error code rather than 0 (VI_SUCCESS).
self._library.niModInst_GetExtendedErrorInfo(error_info_buffer_size_ctype, error_info_ctype)
return error_info_ctype.value.decode("ascii")
def close_installed_devices_session(self): # noqa: N802
handle_ctype = _visatype.ViSession(self._handle) # case S110
error_code = self._library.niModInst_CloseInstalledDevicesSession(handle_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def get_extended_error_info(self): # noqa: N802
error_info_buffer_size_ctype = _visatype.ViInt32() # case S170
error_info_ctype = None # case C050
error_code = self._library.niModInst_GetExtendedErrorInfo(error_info_buffer_size_ctype, error_info_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=True)
error_info_buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
error_info_ctype = (_visatype.ViChar * error_info_buffer_size_ctype.value)() # case C060
error_code = self._library.niModInst_GetExtendedErrorInfo(error_info_buffer_size_ctype, error_info_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_info_ctype.value.decode(self._encoding)
def METHOD_NAME(self, index, attribute_id): # noqa: N802
handle_ctype = _visatype.ViSession(self._handle) # case S110
index_ctype = _visatype.ViInt32(index) # case S150
attribute_id_ctype = _visatype.ViInt32(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niModInst_GetInstalledDeviceAttributeViInt32(handle_ctype, index_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(attribute_value_ctype.value)
def get_installed_device_attribute_vi_string(self, index, attribute_id): # noqa: N802
handle_ctype = _visatype.ViSession(self._handle) # case S110
index_ctype = _visatype.ViInt32(index) # case S150
attribute_id_ctype = _visatype.ViInt32(attribute_id) # case S150
attribute_value_buffer_size_ctype = _visatype.ViInt32() # case S170
attribute_value_ctype = None # case C050
error_code = self._library.niModInst_GetInstalledDeviceAttributeViString(handle_ctype, index_ctype, attribute_id_ctype, attribute_value_buffer_size_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=False)
attribute_value_buffer_size_ctype = _visatype.ViInt32(error_code) # case S180
attribute_value_ctype = (_visatype.ViChar * attribute_value_buffer_size_ctype.value)() # case C060
error_code = self._library.niModInst_GetInstalledDeviceAttributeViString(handle_ctype, index_ctype, attribute_id_ctype, attribute_value_buffer_size_ctype, attribute_value_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return attribute_value_ctype.value.decode(self._encoding)
def open_installed_devices_session(self, driver): # noqa: N802
driver_ctype = ctypes.create_string_buffer(driver.encode(self._encoding)) # case C020
handle_ctype = _visatype.ViSession() # case S220
device_count_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niModInst_OpenInstalledDevicesSession(driver_ctype, None if handle_ctype is None else (ctypes.pointer(handle_ctype)), None if device_count_ctype is None else (ctypes.pointer(device_count_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(handle_ctype.value), int(device_count_ctype.value) |
299,928 | bytescale | """
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import singledispatch
import numpy as np
from PIL import Image
from ..config import NumberField, BoolField
from ..utils import get_size_from_config
from .postprocessor import PostprocessorWithSpecificTargets
from ..representation import (
SegmentationPrediction, SegmentationAnnotation,
AnomalySegmentationAnnotation, AnomalySegmentationPrediction,
BackgroundMattingAnnotation, BackgroundMattingPrediction,
SalientRegionAnnotation, SalientRegionPrediction
)
class ResizeSegmentationMask(PostprocessorWithSpecificTargets):
__provider__ = 'resize_segmentation_mask'
annotation_types = (
SegmentationAnnotation,
AnomalySegmentationAnnotation,
BackgroundMattingAnnotation,
SalientRegionAnnotation
)
prediction_types = (
SegmentationPrediction,
AnomalySegmentationPrediction,
BackgroundMattingPrediction,
SalientRegionPrediction
)
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'dst_width': NumberField(
value_type=int, optional=True, min_value=1, description="Destination width for resize"
),
'dst_height': NumberField(
value_type=int, optional=True, min_value=1, description="Destination height for resize."
),
'size': NumberField(
value_type=int, optional=True, min_value=1,
description="Destination size for resize for both dimensions (height and width)."
),
'to_dst_image_size': BoolField(optional=True, default=False)
})
return parameters
def configure(self):
self.dst_height, self.dst_width = get_size_from_config(self.config, allow_none=True)
self.to_dst_image_size = self.get_value_from_config('to_dst_image_size')
self._deprocess_predictions = False
def process_image(self, annotation, prediction):
target_height = self.dst_height or self.image_size[0]
target_width = self.dst_width or self.image_size[1]
if self._deprocess_predictions:
target_height, target_width = self.image_size[:2]
@singledispatch
def resize_segmentation_mask(entry, height, width):
return entry
@resize_segmentation_mask.register(SegmentationPrediction)
@resize_segmentation_mask.register(AnomalySegmentationPrediction)
@resize_segmentation_mask.register(BackgroundMattingPrediction)
@resize_segmentation_mask.register(SalientRegionPrediction)
def _(entry, height, width):
if len(entry.mask.shape) == 2:
entry.mask = self.segm_resize(entry.mask, width, height)
return entry
entry_mask = []
for class_mask in entry.mask:
resized_mask = self.segm_resize(class_mask, width, height)
entry_mask.append(resized_mask)
entry.mask = np.array(entry_mask)
return entry
@resize_segmentation_mask.register(SegmentationAnnotation)
@resize_segmentation_mask.register(AnomalySegmentationAnnotation)
@resize_segmentation_mask.register(BackgroundMattingAnnotation)
@resize_segmentation_mask.register(SalientRegionAnnotation)
def _(entry, height, width):
entry.mask = self.segm_resize(entry.mask, width, height)
return entry
for target in annotation:
resize_segmentation_mask(target, target_height, target_width)
for target in prediction:
resize_segmentation_mask(target, target_height, target_width)
self._deprocess_predictions = False
return annotation, prediction
@staticmethod
def segm_resize(mask, width, height):
def _to_image(arr):
data = np.asarray(arr)
if np.iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
if len(shape) == 2:
return _process_2d(data, shape)
if len(shape) == 3 and shape[2] in (3, 4):
return _process_3d(data, shape)
raise ValueError("'arr' does not have a suitable array shape for any mode.")
def _process_2d(data, shape):
height, width = shape
bytedata = METHOD_NAME(data)
image = Image.frombytes('L', (width, height), bytedata.tobytes())
return image
def _process_3d(data, shape):
bytedata = METHOD_NAME(data)
height, width, channels = shape
mode = 'RGB' if channels == 3 else 'RGBA'
image = Image.frombytes(mode, (width, height), bytedata.tobytes())
return image
def METHOD_NAME(data):
if data.dtype == np.uint8:
return data
cmin = data.min()
cmax = data.max()
if cmin >= 0 and cmax <= 255 and data.dtype not in [np.float32, np.float16, float]:
return data.astype(np.uint8)
cscale = cmax - cmin
if cscale == 0:
cscale = 1
scale = float(255) / cscale
bytedata = (data - cmin) * scale
return (bytedata.clip(0, 255) + 0.5).astype(np.uint8)
image = _to_image(mask)
image_new = image.resize((width, height), resample=0)
return np.array(image_new)
def process_image_with_metadata(self, annotation, prediction, image_metadata=None):
if 'image_info' in image_metadata and self.to_dst_image_size and not self._deprocess_predictions:
self.image_size = image_metadata['image_info']
if image_metadata and self._deprocess_predictions and 'padding' in image_metadata:
self._deprocess_prediction_with_padding(prediction, image_metadata)
self.process_image(annotation, prediction)
@staticmethod
def _deprocess_prediction_with_padding(prediction, image_metadata):
if image_metadata.get('padding_disabled', False):
return
top, left, bottom, right = image_metadata['padding']
for pred in prediction:
mask = pred.mask
if mask.ndim == 2:
h, w = mask.shape
mask = mask[top:(h - bottom), left:(w - right)]
pred.mask = mask
continue
_, h, w = mask.shape
mask = mask[:, top:(h - bottom), left:(w - right)]
pred.mask = mask |
299,929 | test writer factory | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#
# Unit Tests for util/misc
#
import os
import pyomo.common.unittest as unittest
from pyomo.opt import (
AbstractProblemWriter,
AbstractResultsReader,
OptSolver,
ReaderFactory,
SolverFactory,
WriterFactory,
)
from pyomo.opt.base.solvers import UnknownSolver
from pyomo.opt.plugins.sol import ResultsReader_sol
from pyomo.solvers.plugins.solvers.CBCplugin import MockCBC
class MockWriter(AbstractProblemWriter):
def __init__(self, name=None):
AbstractProblemWriter.__init__(self, name)
class MockReader(AbstractResultsReader):
def __init__(self, name=None):
AbstractResultsReader.__init__(self, name)
class MockSolver(OptSolver):
def __init__(self, **kwds):
kwds['type'] = 'stest_type'
kwds['doc'] = 'MockSolver Documentation'
OptSolver.__init__(self, **kwds)
class OptFactoryDebug(unittest.TestCase):
@classmethod
def setUpClass(cls):
import pyomo.environ
def tearDown(self):
ReaderFactory.unregister('rtest3')
ReaderFactory.unregister('stest3')
ReaderFactory.unregister('wtest3')
def test_solver_factory(self):
"""
Testing the pyomo.opt solver factory with MIP solvers
"""
SolverFactory.register('stest3')(MockSolver)
ans = sorted(SolverFactory)
tmp = [
'_mock_asl',
'_mock_cbc',
'_mock_cplex',
'_mock_glpk',
'cbc',
'cplex',
'glpk',
'scip',
'stest3',
'asl',
]
tmp.sort()
self.assertTrue(
set(tmp) <= set(ans), msg="Set %s is not a subset of set %s" % (tmp, ans)
)
def test_solver_instance(self):
"""
Testing that we get a specific solver instance
"""
ans = SolverFactory("none")
self.assertTrue(isinstance(ans, UnknownSolver))
ans = SolverFactory("_mock_cbc")
self.assertEqual(type(ans), MockCBC)
ans = SolverFactory("_mock_cbc", name="mymock")
self.assertEqual(type(ans), MockCBC)
self.assertEqual(ans.name, "mymock")
def test_solver_registration(self):
"""
Testing methods in the solverwriter factory registration process
"""
SolverFactory.unregister('stest3')
self.assertTrue('stest3' not in SolverFactory)
SolverFactory.register('stest3')(MockSolver)
self.assertTrue('stest3' in SolverFactory)
self.assertTrue('_mock_cbc' in SolverFactory)
def METHOD_NAME(self):
"""
Testing the pyomo.opt writer factory with MIP writers
"""
WriterFactory.register('wtest3')(MockWriter)
factory = WriterFactory
self.assertTrue(set(['wtest3']) <= set(factory))
def test_writer_instance(self):
"""
Testing that we get a specific writer instance
Note: this simply provides code coverage right now, but
later it should be adapted to generate a specific writer.
"""
ans = WriterFactory("none")
self.assertEqual(ans, None)
ans = WriterFactory("wtest3")
self.assertNotEqual(ans, None)
def test_writer_registration(self):
"""
Testing methods in the writer factory registration process
"""
WriterFactory.unregister('wtest3')
self.assertTrue(not 'wtest3' in WriterFactory)
WriterFactory.register('wtest3')(MockWriter)
self.assertTrue('wtest3' in WriterFactory)
def test_reader_factory(self):
"""
Testing the pyomo.opt reader factory
"""
ReaderFactory.register('rtest3')(MockReader)
ans = ReaderFactory
# self.assertEqual(len(ans),4)
self.assertTrue(set(ans) >= set(["rtest3", "sol", "yaml", "json"]))
def test_reader_instance(self):
"""
Testing that we get a specific reader instance
"""
ans = ReaderFactory("none")
self.assertEqual(ans, None)
ans = ReaderFactory("sol")
self.assertEqual(type(ans), ResultsReader_sol)
# ans = pyomo.opt.ReaderFactory("osrl", "myreader")
# self.assertEqual(type(ans), pyomo.opt.reader.OS.ResultsReader_osrl)
# self.assertEqual(ans.name, "myreader")
def test_reader_registration(self):
"""
Testing methods in the reader factory registration process
"""
ReaderFactory.unregister('rtest3')
self.assertTrue(not 'rtest3' in ReaderFactory)
ReaderFactory.register('rtest3')(MockReader)
self.assertTrue('rtest3' in ReaderFactory)
if __name__ == "__main__":
unittest.main() |
299,930 | test set endpoint | # -*- coding: UTF-8 -*-
#tests/unit/test_textInfos.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2018 NV Access Limited, Babbage B.V.
"""Unit tests for the textInfos module, its submodules and classes."""
import unittest
from .textProvider import BasicTextProvider
import textInfos
from textInfos.offsets import Offsets
class TestCharacterOffsets(unittest.TestCase):
"""
Tests for textInfos.offsets.OffsetsTextInfo for its ability to deal with
UTF-16 surrogate characters (i.e. whether a surrogate pair is treated as one character).
These tests are also implicit tests for the textUtils module,
as its logic is used for character offset calculation in wide character strings.
"""
def test_nonSurrogateForward(self):
obj = BasicTextProvider(text="abc")
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at b
self.assertEqual(ti.offsets, (1, 2)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (2, 3)) # One offset
def test_nonSurrogateBackward(self):
obj = BasicTextProvider(text="abc")
ti = obj.makeTextInfo(Offsets(2, 2))
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (2, 3)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at b
self.assertEqual(ti.offsets, (1, 2)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
def test_surrogatePairsForward(self):
obj = BasicTextProvider(text=u"\U0001f926\U0001f60a\U0001f44d") # 🤦😊👍
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (0, 2)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 😊
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 👍
self.assertEqual(ti.offsets, (4, 6)) # Two offsets
def test_surrogatePairsBackward(self):
obj = BasicTextProvider(text=u"\U0001f926\U0001f60a\U0001f44d") # 🤦😊👍
ti = obj.makeTextInfo(Offsets(5, 5))
ti.expand(textInfos.UNIT_CHARACTER) # Range at 👍
self.assertEqual(ti.offsets, (4, 6)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 😊
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (0, 2)) # Two offsets
def test_mixedSurrogatePairsAndNonSurrogatesForward(self):
obj = BasicTextProvider(text=u"a\U0001f926b") # a🤦b
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (1, 3)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (3, 4)) # One offset
def test_mixedSurrogatePairsAndNonSurrogatesBackward(self):
obj = BasicTextProvider(text=u"a\U0001f926b") # a🤦b
ti = obj.makeTextInfo(Offsets(3, 3))
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (3, 4)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (1, 3)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
def test_mixedSurrogatePairsNonSurrogatesAndSingleSurrogatesForward(self):
"""
Tests surrogate pairs, non surrogates as well as
single surrogate characters (i.e. incomplete pairs)
"""
obj = BasicTextProvider(text=u"a\ud83e\U0001f926\udd26b")
ti = obj.makeTextInfo(Offsets(0, 0))
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🠀
self.assertEqual(ti.offsets, (1, 2)) # Leading surrogate without a trailing surrogate
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at
self.assertEqual(ti.offsets, (4, 5)) # Trailing surrogate without a leading surrogate.
ti.move(textInfos.UNIT_CHARACTER, 1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (5, 6)) # One offset
def test_mixedSurrogatePairsNonSurrogatesAndSingleSurrogatesBackward(self):
obj = BasicTextProvider(text=u"a\ud83e\U0001f926\udd26b")
ti = obj.makeTextInfo(Offsets(5, 5))
ti.expand(textInfos.UNIT_CHARACTER) # Range at c
self.assertEqual(ti.offsets, (5, 6)) # One offset
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at
self.assertEqual(ti.offsets, (4, 5)) # Trailing surrogate without a leading surrogate.
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🤦
self.assertEqual(ti.offsets, (2, 4)) # Two offsets
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at 🠀
self.assertEqual(ti.offsets, (1, 2)) # Leading surrogate without a trailing surrogate
ti.move(textInfos.UNIT_CHARACTER, -1)
ti.expand(textInfos.UNIT_CHARACTER) # Range at a
self.assertEqual(ti.offsets, (0, 1)) # One offset
class TestEndpoints(unittest.TestCase):
def test_TextInfoEndpoint_largerAndSmaller(self):
obj = BasicTextProvider(text="abcdef")
ti = obj.makeTextInfo(Offsets(0, 2))
smaller = ti.start
larger = ti.end
self.assertTrue(smaller < larger)
self.assertFalse(larger < smaller)
self.assertTrue(smaller <= larger)
self.assertFalse(larger <= smaller)
self.assertFalse(smaller >= larger)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller > larger)
self.assertTrue(larger > smaller)
self.assertTrue(smaller != larger)
self.assertTrue(larger != smaller)
def test_TextInfoEndpoint_equal(self):
obj = BasicTextProvider(text="abcdef")
ti = obj.makeTextInfo(Offsets(1, 1))
self.assertTrue(ti.start == ti.end)
self.assertFalse(ti.start != ti.end)
self.assertFalse(ti.start < ti.end)
self.assertTrue(ti.start <= ti.end)
self.assertTrue(ti.start >= ti.end)
self.assertFalse(ti.start > ti.end)
def METHOD_NAME(self):
obj = BasicTextProvider(text="abcdef")
ti1 = obj.makeTextInfo(Offsets(0, 2))
ti2 = obj.makeTextInfo(Offsets(3, 5))
ti1.end = ti2.end
self.assertEqual((ti1._startOffset, ti1._endOffset), (0, 5))
ti1.start = ti2.start
self.assertEqual((ti1._startOffset, ti1._endOffset), (3, 5))
ti1.end = ti2.start
self.assertEqual((ti1._startOffset, ti1._endOffset), (3, 3))
ti1.start = ti2.end
self.assertEqual((ti1._startOffset, ti1._endOffset), (5, 5)) |
299,931 | get stations with order | """ HFI admin logic """
from collections import defaultdict
from itertools import groupby
from operator import attrgetter
from typing import Dict, List, Set, Tuple
from datetime import datetime
from app.db.models.hfi_calc import PlanningWeatherStation
from app.schemas.hfi_calc import HFIAdminAddedStation
def update_stations(stations_to_remove: List[PlanningWeatherStation],
all_planning_area_stations: List[PlanningWeatherStation],
to_add: List[HFIAdminAddedStation],
timestamp: datetime,
username: str) -> List[PlanningWeatherStation]:
"""
Orchestrates removal and addition of stations
"""
stations_marked_for_removal, stations_with_order_updates = remove_stations(
stations_to_remove, all_planning_area_stations, timestamp, username)
next_order_by_planning_area = get_next_order_by_planning_area(
stations_with_order_updates, all_planning_area_stations)
stations_to_add = add_stations(to_add, next_order_by_planning_area, timestamp, username)
return stations_marked_for_removal + stations_with_order_updates + stations_to_add
def remove_stations(remove_station_list: List[PlanningWeatherStation],
all_planning_area_stations: List[PlanningWeatherStation],
timestamp: datetime,
username: str):
"""
Marks stations for removal and update station ordering for planning area
"""
stations_to_remove = []
planning_areas_with_removals = defaultdict(set)
# Mark stations for removal and track their orders for updating other stations in planning area
for station in remove_station_list:
station.update_timestamp = timestamp
station.update_user = username
station.is_deleted = True
planning_areas_with_removals[station.planning_area_id].add(
station.order_of_appearance_in_planning_area_list)
station.order_of_appearance_in_planning_area_list = None
stations_to_remove.append(station)
# Handle order updates
stations_with_order_updates = update_station_ordering(planning_areas_with_removals, all_planning_area_stations)
return stations_to_remove, stations_with_order_updates
def update_station_ordering(planning_areas_with_removals: Dict[int, Set[Tuple[int, int]]],
all_planning_area_stations: List[PlanningWeatherStation]):
"""
Given a dict of [planning_area_id] -> (station_code, order),
indicating a station removed from a planning area, and list of all stations
for the keyed planning areas, update the order of the stations.
"""
stations_with_order_updates = []
key = attrgetter('planning_area_id')
all_stations_by_planning_area = dict((k, list(map(lambda x: x, values)))
for k, values in groupby(sorted(all_planning_area_stations, key=key), key))
for planning_area_id, removed_stations in planning_areas_with_removals.items():
all_stations = all_stations_by_planning_area.get(planning_area_id, None)
if all_stations is not None:
other_stations = METHOD_NAME(get_other_stations(removed_stations, all_stations))
sorted_other_stations: List[PlanningWeatherStation] = sorted(
other_stations, key=attrgetter('order_of_appearance_in_planning_area_list'))
for idx, sorted_station in enumerate(sorted_other_stations):
sorted_station.order_of_appearance_in_planning_area_list = idx + 1
stations_with_order_updates.append(sorted_station)
return stations_with_order_updates
def get_other_stations(stations_removed: Set[Tuple[int, int]], all_stations: List[PlanningWeatherStation]):
"""
Given a set of removed stations, {(station_code, order), ...},
and list of all stations, return a list of stations not in set
"""
return list(filter(
lambda x: (x.station_code, x.order_of_appearance_in_planning_area_list) not in stations_removed,
all_stations))
def METHOD_NAME(stations: List[PlanningWeatherStation]):
"""
Returns list of stations that have an order
"""
return list(filter(lambda x: x.order_of_appearance_in_planning_area_list is not None, stations))
def add_stations(stations_to_add: List[HFIAdminAddedStation],
next_order_by_planning_area: Dict[int, int],
timestamp: datetime,
username: str) -> List[PlanningWeatherStation]:
"""
Given a list of station data to add, and the next order for a station for each planning area,
return the station data and order as planning weather stations.
"""
added_stations: List[PlanningWeatherStation] = []
for station_to_add in stations_to_add:
order = next_order_by_planning_area.get(station_to_add.planning_area_id, 1)
station = PlanningWeatherStation(
planning_area_id=station_to_add.planning_area_id,
station_code=station_to_add.station_code,
order_of_appearance_in_planning_area_list=order,
fuel_type_id=station_to_add.fuel_type_id,
create_user=username,
update_user=username,
create_timestamp=timestamp,
update_timestamp=timestamp,
is_deleted=False
)
added_stations.append(station)
next_order_by_planning_area[station.planning_area_id] = order + 1
return added_stations
def get_next_order_by_planning_area(station_with_order_updates: List[PlanningWeatherStation],
all_planning_area_stations: List[PlanningWeatherStation]) -> Dict[int, int]:
""" Return next highest ordering for each planning area """
next_order_by_planning_area = {}
key = attrgetter('planning_area_id')
updated_stations_by_planning_area = dict((k, list(map(lambda x: x, values)))
for k, values in groupby(sorted(station_with_order_updates, key=key), key))
all_stations_by_planning_area = dict((k, list(map(lambda x: x, values)))
for k, values in groupby(sorted(all_planning_area_stations, key=key), key))
for planning_area_id, planning_area_stations in all_stations_by_planning_area.items():
updated_stations = updated_stations_by_planning_area.get(planning_area_id, [])
next_order_by_planning_area[planning_area_id] = get_next_order(updated_stations, planning_area_stations)
return next_order_by_planning_area
def get_next_order(updated_stations: List[PlanningWeatherStation], other_stations: List[PlanningWeatherStation]):
"""
Returns the next order for a list of planning stations based on updated and existing stations.
Updated stations include additions and removals, so the next order could be smaller than the
max order in the existing stations list.
"""
updated_orders = [station.order_of_appearance_in_planning_area_list for station in updated_stations]
# An existing station could be removed and hence have no order
existing_orders = [
station.order_of_appearance_in_planning_area_list for station in other_stations
if station.order_of_appearance_in_planning_area_list is not None]
if len(updated_orders) == 0:
if len(existing_orders) == 0:
return 1
return max(existing_orders) + 1
return max(updated_orders) + 1
def get_unique_planning_area_ids(stations: List[PlanningWeatherStation]):
return list({station.planning_area_id for station in stations}) |
299,932 | hardening edp | import sys
sys.path.append('../')
import numpy as np
import elastoPlasticWellboreAnalyticalSolutions as epwAnal
#Note: this solution is developed by Chen and Abousleiman (2017) for the case with zero cohesion
def elastic_plastic_boundary_EDP(sh,sv,q_ep_boundary):
# See eq. 25 of Chen and Abousleiman 2013
sr0 = sh - np.sqrt(sh**2 - (4.0 * sh**2 + sv**2 - 2 * sh * sv - q_ep_boundary**2) / 3.0)
s00 = 2.0*sh - sr0
sz0 = sv
return sr0, s00, sz0
def dFdpq_EDP(p, q):
#F = q-tan(beta)*p for zero cohesion
#F=0 -> tan(beta) = q/p
param_b = q/p
cos2Beta = 1.0/(param_b**2.0 + 1.0)
dFdp = -param_b
dFdq = 1.0
dFdbeta = -p/cos2Beta
return dFdp, dFdq, dFdbeta
def METHOD_NAME(param_b, param_m, param_b_f, param_b_i):
# gammaP is the deviatoric plastic strain
# param_b = gammaP/(c + gammaP) * (param_b_f - param_b_i) + param_b_i
# that yields: gammaP/(c + gammaP) = (param_b-param_b_i)/(param_b_f - param_b_i)
# or c/(c + gammaP) = (param_b_f-param_b)/(param_b_f - param_b_i)
# The derivative of the hardening law gives: dparam_bdGammaP = (param_b_f-param_b_i)c/(c + gammaP)**2
# or dparam_bdGammaP = (param_b_f-param_b)**2/(param_b_f - param_b_i)/c
# Finally dBetadGammaP = cos2Beta*(param_b_f-param_b)**2/(param_b_f - param_b_i)/c
# Validation against Chen and Abousleiman 2017
# h = -dFdbeta*dBetadGammaP
# h = p*(param_b_f-param_b)**2/(param_b_f - param_b_i)/c
# y = 1/h = c(param_b_f - param_b_i)p/(p*param_b_f - q)**2.0 that is identical to Chen and Abousleiman 2017
cos2Beta = 1.0/(param_b**2+1.0)
return cos2Beta*(param_b_f-param_b)**2/(param_b_f - param_b_i)/param_m
def compute_q_ep_boudary_EDP(sh,sv,param_b_i):
p0,q0 = epwAnal.invariants(sh,sh,sv)
return param_b_i*p0
def compute_param_b(frictionAngle):
sin_frictionAngle = np.sin(frictionAngle)
return 6.0*sin_frictionAngle/(3.0-sin_frictionAngle)
def EDP(a0_a_ratio, sh, sv, nu, a0, G, initialFrictionAngle, finalFrictionAngle, param_m):
a = a0/a0_a_ratio
xi_well = 1.0 - a0/a # the auxiliary variable xi at the wellbore, xi = (a-a0)/a = 1-1/(a/a0)
nPoints = 1000
initialFrictionAngle *= np.pi/180.0 # converted to rad
finalFrictionAngle *= np.pi/180.0 # converted to rad
param_b_i = compute_param_b(initialFrictionAngle)
param_b_f = compute_param_b(finalFrictionAngle)
# ELastic trial
pw = 2.0*G*xi_well + sh
r_e,sr_e,s0_e,sz_e = epwAnal.solution_elastic(sh,sv,1.0,pw)
p_e, q_e = epwAnal.invariants(sr_e[0],s0_e[0],sz_e[0])
if ((q_e/p_e)<param_b_i): # Pure elastic wellbore
return [0],[0],[0],[0],r_e,sr_e,s0_e,sz_e,pw,p_e,q_e
else: # Elastic-plastic wellbore
q_ep_boundary = compute_q_ep_boudary_EDP(sh,sv,param_b_i)
# Elastic-Plastic boundary
sr_ep_boundary, s0_ep_boundary, sz_ep_boundary = elastic_plastic_boundary_EDP(sh,sv,q_ep_boundary)
sr_p = [sr_ep_boundary]
s0_p = [s0_ep_boundary]
sz_p = [sz_ep_boundary]
epsV_p = [0] # strain calculated starting from the reference state that is the ep boundary
# Eq. 36 in Chen and Abousleiman 2017
xi_ep_boundary = (sr_ep_boundary - sh) / 2.0 / G
# Mesh from elastic-plastic boundary to wellbore
dxi = (xi_well - xi_ep_boundary) / (nPoints - 1)
xi = np.linspace(xi_ep_boundary, xi_well, nPoints)
for i in range(1, nPoints):
p,q = epwAnal.invariants(sr_p[i-1],s0_p[i-1],sz_p[i-1])
exp_epsilon_V = np.exp(epsV_p[i-1])# epsV is the volumetric strain from the elastic-plastic boundary
E = 2.0 * G * (1.0 + nu)
param_b = q/p # yield condition F=0
dFdp, dFdq, dFdbeta = dFdpq_EDP(p,q)
dBetadGammaP = METHOD_NAME(param_b,param_m,param_b_f,param_b_i)
dGdq = dFdq # assicated model
dFdHardningParam,dHardningParamdPlasticVar,dPotentialdStressVar = dFdbeta,dBetadGammaP,dGdq
sr_i, s0_i, sz_i, epsV_i = epwAnal.solution_plastic(sr_p[i - 1], s0_p[i - 1], sz_p[i - 1], epsV_p[i-1], xi[i - 1], dxi, dFdp, dFdq, E, nu, dFdHardningParam,dHardningParamdPlasticVar,dPotentialdStressVar)
sr_p.append(sr_i)
s0_p.append(s0_i)
sz_p.append(sz_i)
epsV_p.append(epsV_i)
# Wellbore surface stress
pw = sr_i
p = (sr_i + s0_i + sz_i)/3.0
q = np.sqrt(0.5) * np.sqrt( (sr_i-s0_i)**2.0 + (sr_i-sz_i)**2.0 + (s0_i-sz_i)**2.0 )
# Compute the normalized radial coordinate r/a
r_p = epwAnal.compute_radialCoordinate(xi, epsV_p)
# Elastic zone
r_ep_boundary = r_p[0]
r_e,sr_e,s0_e,sz_e = epwAnal.solution_elastic(sh,sv,r_ep_boundary,sr_ep_boundary)
return r_p,sr_p,s0_p,sz_p,r_e,sr_e,s0_e,sz_e,pw,p,q
|
299,933 | test get failure repeat record count | import uuid
from datetime import datetime, timedelta
from django.test import TestCase
from corehq.motech.repeaters.const import RECORD_PENDING_STATE
from corehq.motech.repeaters.dbaccessors import (
get_cancelled_repeat_record_count,
get_domains_that_have_repeat_records,
get_failure_repeat_record_count,
get_overdue_repeat_record_count,
get_paged_repeat_records,
get_pending_repeat_record_count,
get_repeat_record_count,
get_repeat_records_by_payload_id,
get_success_repeat_record_count,
iter_repeat_records_by_domain,
iterate_repeat_record_ids,
)
from corehq.motech.repeaters.models import RepeatRecord
class TestRepeatRecordDBAccessors(TestCase):
repeater_id = '1234'
other_id = '5678'
domain = 'test-domain-2'
@classmethod
def setUpClass(cls):
super(TestRepeatRecordDBAccessors, cls).setUpClass()
before = datetime.utcnow() - timedelta(minutes=5)
cls.payload_id_1 = uuid.uuid4().hex
cls.payload_id_2 = uuid.uuid4().hex
failed = RepeatRecord(
domain=cls.domain,
failure_reason='Some python error',
repeater_id=cls.repeater_id,
next_check=before,
payload_id=cls.payload_id_1,
)
failed_hq_error = RepeatRecord(
domain=cls.domain,
failure_reason='Some python error',
repeater_id=cls.repeater_id,
next_check=before,
payload_id=cls.payload_id_1,
)
failed_hq_error.doc_type += '-Failed'
success = RepeatRecord(
domain=cls.domain,
succeeded=True,
repeater_id=cls.repeater_id,
next_check=before,
payload_id=cls.payload_id_2,
)
pending = RepeatRecord(
domain=cls.domain,
succeeded=False,
repeater_id=cls.repeater_id,
next_check=before,
payload_id=cls.payload_id_2,
)
overdue = RepeatRecord(
domain=cls.domain,
succeeded=False,
repeater_id=cls.repeater_id,
next_check=before - timedelta(minutes=10),
payload_id=cls.payload_id_2,
)
cancelled = RepeatRecord(
domain=cls.domain,
succeeded=False,
cancelled=True,
repeater_id=cls.repeater_id,
next_check=before,
payload_id=cls.payload_id_2,
)
empty = RepeatRecord(
domain=cls.domain,
succeeded=True,
cancelled=True,
repeater_id=cls.repeater_id,
next_check=before,
payload_id=cls.payload_id_2,
)
other_id = RepeatRecord(
domain=cls.domain,
succeeded=False,
repeater_id=cls.other_id,
next_check=before,
payload_id=cls.payload_id_2,
)
cls.records = [
failed,
failed_hq_error,
success,
pending,
overdue,
cancelled,
empty,
other_id,
]
for record in cls.records:
record.save()
@classmethod
def tearDownClass(cls):
for record in cls.records:
record.delete()
super(TestRepeatRecordDBAccessors, cls).tearDownClass()
def test_get_pending_repeat_record_count(self):
count = get_pending_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 2)
def test_get_success_repeat_record_count(self):
count = get_success_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 2) # Empty records are included
def METHOD_NAME(self):
count = get_failure_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 2)
def test_get_cancelled_repeat_record_count(self):
count = get_cancelled_repeat_record_count(self.domain, self.repeater_id)
self.assertEqual(count, 1) # Empty records are not included
def test_get_repeat_record_count_with_state_and_no_repeater(self):
count = get_repeat_record_count(self.domain, state=RECORD_PENDING_STATE)
self.assertEqual(count, 3)
def test_get_repeat_record_count_with_repeater_id_and_no_state(self):
count = get_repeat_record_count(self.domain, repeater_id=self.other_id)
self.assertEqual(count, 1)
def test_get_paged_repeat_records_with_state_and_no_records(self):
count = get_repeat_record_count('wrong-domain', state=RECORD_PENDING_STATE)
self.assertEqual(count, 0)
def test_get_paged_repeat_records(self):
records = get_paged_repeat_records(self.domain, 0, 2)
self.assertEqual(len(records), 2)
def test_get_paged_repeat_records_with_repeater_id(self):
records = get_paged_repeat_records(self.domain, 0, 2, repeater_id=self.other_id)
self.assertEqual(len(records), 1)
def test_get_paged_repeat_records_with_state(self):
records = get_paged_repeat_records(self.domain, 0, 10, state=RECORD_PENDING_STATE)
self.assertEqual(len(records), 3)
def test_get_paged_repeat_records_wrong_domain(self):
records = get_paged_repeat_records('wrong-domain', 0, 2)
self.assertEqual(len(records), 0)
def test_get_all_paged_repeat_records(self):
records = get_paged_repeat_records(self.domain, 0, 10)
self.assertEqual(len(records), len(self.records)) # get all the records that were created
def test_iterate_repeat_records(self):
records = list(iterate_repeat_record_ids(datetime.utcnow(), chunk_size=2))
self.assertEqual(len(records), 4) # Should grab all but the succeeded one
def test_get_overdue_repeat_record_count(self):
overdue_count = get_overdue_repeat_record_count()
self.assertEqual(overdue_count, 1)
def test_get_all_repeat_records_by_domain_wrong_domain(self):
records = list(iter_repeat_records_by_domain("wrong-domain"))
self.assertEqual(len(records), 0)
def test_get_all_repeat_records_by_domain_with_repeater_id(self):
records = list(iter_repeat_records_by_domain(self.domain, repeater_id=self.repeater_id))
self.assertEqual(len(records), 7)
def test_get_all_repeat_records_by_domain(self):
records = list(iter_repeat_records_by_domain(self.domain))
self.assertEqual(len(records), len(self.records))
def test_get_repeat_records_by_payload_id(self):
id_1_records = list(get_repeat_records_by_payload_id(self.domain, self.payload_id_1))
self.assertEqual(len(id_1_records), 2)
self.assertItemsEqual([r._id for r in id_1_records], [r._id for r in self.records[:2]])
id_2_records = list(get_repeat_records_by_payload_id(self.domain, self.payload_id_2))
self.assertEqual(len(id_2_records), 6)
self.assertItemsEqual([r._id for r in id_2_records], [r._id for r in self.records[2:]])
class TestOtherDBAccessors(TestCase):
@classmethod
def setUpClass(cls):
super(TestOtherDBAccessors, cls).setUpClass()
cls.records = [
RepeatRecord(domain='a'),
RepeatRecord(domain='b'),
RepeatRecord(domain='c'),
]
RepeatRecord.bulk_save(cls.records)
@classmethod
def tearDownClass(cls):
RepeatRecord.bulk_delete(cls.records)
super(TestOtherDBAccessors, cls).tearDownClass()
def test_get_domains_that_have_repeat_records(self):
self.assertEqual(get_domains_that_have_repeat_records(), ['a', 'b', 'c']) |
299,934 | convert unix date | from pandas.errors import OutOfBoundsDatetime
import datetime as dt
from typing import Hashable
import pandas_flavor as pf
import pandas as pd
from pandas.api.types import is_numeric_dtype
from janitor.utils import deprecated_alias
@pf.register_dataframe_method
@deprecated_alias(column="column_name")
def convert_excel_date(
df: pd.DataFrame, column_name: Hashable
) -> pd.DataFrame:
"""Convert Excel's serial date format into Python datetime format.
This method mutates the original DataFrame.
Implementation is also from
[Stack Overflow](https://stackoverflow.com/questions/38454403/convert-excel-style-date-with-pandas).
Examples:
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({"date": [39690, 39690, 37118]})
>>> df
date
0 39690
1 39690
2 37118
>>> df.convert_excel_date('date')
date
0 2008-08-30
1 2008-08-30
2 2001-08-15
Args:
df: A pandas DataFrame.
column_name: A column name.
Raises:
ValueError: If there are non numeric values in the column.
Returns:
A pandas DataFrame with corrected dates.
""" # noqa: E501
if not is_numeric_dtype(df[column_name]):
raise ValueError(
"There are non-numeric values in the column. "
"All values must be numeric."
)
df[column_name] = pd.TimedeltaIndex(
df[column_name], unit="d"
) + dt.datetime(
1899, 12, 30
) # noqa: W503
return df
@pf.register_dataframe_method
@deprecated_alias(column="column_name")
def convert_matlab_date(
df: pd.DataFrame, column_name: Hashable
) -> pd.DataFrame:
"""Convert Matlab's serial date number into Python datetime format.
Implementation is also from
[Stack Overflow](https://stackoverflow.com/questions/13965740/converting-matlabs-datenum-format-to-python).
This method mutates the original DataFrame.
Examples:
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({"date": [737125.0, 737124.815863, 737124.4985, 737124]})
>>> df
date
0 737125.000000
1 737124.815863
2 737124.498500
3 737124.000000
>>> df.convert_matlab_date('date')
date
0 2018-03-06 00:00:00.000000
1 2018-03-05 19:34:50.563200
2 2018-03-05 11:57:50.399999
3 2018-03-05 00:00:00.000000
Args:
df: A pandas DataFrame.
column_name: A column name.
Returns:
A pandas DataFrame with corrected dates.
""" # noqa: E501
days = pd.Series([dt.timedelta(v % 1) for v in df[column_name]])
df[column_name] = (
df[column_name].astype(int).apply(dt.datetime.fromordinal)
+ days
- dt.timedelta(days=366)
)
return df
@pf.register_dataframe_method
@deprecated_alias(column="column_name")
def METHOD_NAME(df: pd.DataFrame, column_name: Hashable) -> pd.DataFrame:
"""Convert unix epoch time into Python datetime format.
Note that this ignores local tz and convert all timestamps to naive
datetime based on UTC!
This method mutates the original DataFrame.
Examples:
>>> import pandas as pd
>>> import janitor
>>> df = pd.DataFrame({"date": [1651510462, 53394822, 1126233195]})
>>> df
date
0 1651510462
1 53394822
2 1126233195
>>> df.convert_unix_date('date')
date
0 2022-05-02 16:54:22
1 1971-09-10 23:53:42
2 2005-09-09 02:33:15
Args:
df: A pandas DataFrame.
column_name: A column name.
Returns:
A pandas DataFrame with corrected dates.
"""
try:
df[column_name] = pd.to_datetime(df[column_name], unit="s")
except OutOfBoundsDatetime: # Indicates time is in milliseconds.
df[column_name] = pd.to_datetime(df[column_name], unit="ms")
return df |
299,935 | get mock client | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest.mock import MagicMock
from knack.util import CLIError
from azure.cli.command_modules.resource.custom import (_ResourceUtils, _validate_resource_inputs,
parse_resource_id)
class TestApiCheck(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_parse_resource(self):
parts = parse_resource_id('/subscriptions/00000/resourcegroups/bocconitestlabrg138089/'
'providers/microsoft.devtestlab/labs/bocconitestlab/'
'virtualmachines/tasktest1')
self.assertIsNotNone(parts.get('type'))
def test_parse_resource_capital(self):
parts = parse_resource_id('/subscriptions/00000/resourceGroups/bocconitestlabrg138089/'
'providers/microsoft.devtestlab/labs/bocconitestlab/'
'virtualmachines/tasktest1')
self.assertIsNotNone(parts.get('type'))
def test_validate_resource_inputs(self):
self.assertRaises(CLIError, _validate_resource_inputs, None, None, None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', None, None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', 'b', None, None)
self.assertRaises(CLIError, _validate_resource_inputs, 'a', 'b', 'c', None)
_validate_resource_inputs('a', 'b', 'c', 'd')
def test_resolve_api_provider_backup(self):
# Verifies provider is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self.METHOD_NAME()
res_utils = _ResourceUtils(cli, resource_type='Mock/test', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2016-01-01")
def test_resolve_api_provider_with_parent_backup(self):
# Verifies provider (with parent) is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self.METHOD_NAME()
res_utils = _ResourceUtils(cli, parent_resource_path='foo/testfoo123', resource_group_name='rg',
resource_provider_namespace='Mock', resource_type='test',
resource_name='vnet1',
rcf=rcf)
self.assertEqual(res_utils.api_version, "1999-01-01")
def test_resolve_api_all_previews(self):
# Verifies most recent preview version returned only if there are no non-preview versions.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self.METHOD_NAME()
res_utils = _ResourceUtils(cli, resource_type='Mock/preview', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2005-01-01-preview")
def test_resolve_api_provider_latest_include_preview(self):
# Verifies provider is used as backup if api-version not specified.
from azure.cli.core.mock import DummyCli
cli = DummyCli()
rcf = self.METHOD_NAME()
res_utils = _ResourceUtils(cli, resource_type='Mock/test_latest', resource_name='vnet1',
resource_group_name='rg', rcf=rcf)
self.assertEqual(res_utils.api_version, "2015-01-01")
res_utils = _ResourceUtils(cli, resource_type='Mock/test_latest', resource_name='vnet1',
resource_group_name='rg', rcf=rcf, latest_include_preview=True)
self.assertEqual(res_utils.api_version, "2016-01-01-preview")
def METHOD_NAME(self):
client = MagicMock()
provider = MagicMock()
provider.resource_types = [
self._get_mock_resource_type('skip', ['2000-01-01-preview', '2000-01-01']),
self._get_mock_resource_type('test', ['2016-01-01-preview', '2016-01-01']),
self._get_mock_resource_type('foo', ['1999-01-01-preview', '1999-01-01']),
self._get_mock_resource_type('preview', ['2005-01-01-preview', '2004-01-01-preview']),
self._get_mock_resource_type('test_latest', ['2016-01-01-preview', '2015-01-01'])
]
client.providers.get.return_value = provider
return client
def _get_mock_resource_type(self, name, api_versions): # pylint: disable=no-self-use
rt = MagicMock()
rt.resource_type = name
rt.api_versions = api_versions
return rt
if __name__ == '__main__':
unittest.main() |
299,936 | test explain join index scan | import unittest
from unittest.mock import patch
from unittest.mock import Mock
from unittest.mock import MagicMock
from itertools import count
import cal_bitmap_test
from cal_bitmap_test import connect
from cal_bitmap_test import find_crossover
from cal_bitmap_test import explain_index_scan
from cal_bitmap_test import explain_join_scan
class TestStringMethods(unittest.TestCase):
@patch('gppylib.db.dbconn.query')
def test_explain_index_scan(self, mock_query):
mock_query.return_value = Mock()
mock_query.return_value.fetchall.return_value = [
[" Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..8.04 rows=1 width=4)"],
[" -> Index Scan using cal_txtest_i_bitmap_10 on cal_txtest (cost=0.00..8.02 rows=1 width=4)"],
[" Index Cond: (bitmap10 > 42)"]
]
(scan, cost) = explain_index_scan(Mock(), "mock sql query string")
self.assertEqual(scan, cal_bitmap_test.INDEX_SCAN)
self.assertEqual(cost, 8.02)
@patch('gppylib.db.dbconn.query')
def test_explain_index_only_scan(self, mock_query):
mock_query.return_value = Mock()
mock_query.return_value.fetchall.return_value = [
[" Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..8.04 rows=1 width=4)"],
[" -> Index Only Scan using cal_txtest_i_bitmap_10 on cal_txtest (cost=0.00..8.02 rows=1 width=4)"],
[" Index Cond: (bitmap10 > 42)"]
]
(scan, cost) = explain_index_scan(Mock(), "mock sql query string")
self.assertEqual(scan, cal_bitmap_test.INDEX_ONLY_SCAN)
self.assertEqual(cost, 8.02)
@patch('gppylib.db.dbconn.query')
def METHOD_NAME(self, mock_query):
mock_query.return_value = Mock()
mock_query.return_value.fetchall.return_value = [
[" Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..437.00 rows=1 width=4)"],
[" -> Nested Loop (cost=0.00..437.00 rows=1 width=4)"],
[" Join Filter: true"],
[" -> Seq Scan on cal_txtest (cost=0.00..431.00 rows=1 width=4)"],
[" -> Index Scan using cal_txtest_a_idx on cal_txtest cal_txtest_1 (cost=0.00..6.00 rows=1 width=1)"],
[" Index Cond: (a = cal_txtest.a)"]
]
(scan, cost) = explain_join_scan(Mock(), "mock sql query string")
self.assertEqual(scan, cal_bitmap_test.INDEX_SCAN)
self.assertEqual(cost, 437.00)
@patch('gppylib.db.dbconn.query')
def test_explain_join_index_only_scan(self, mock_query):
mock_query.return_value = Mock()
mock_query.return_value.fetchall.return_value = [
[" Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..437.00 rows=1 width=4)"],
[" -> Nested Loop (cost=0.00..437.00 rows=1 width=4)"],
[" Join Filter: true"],
[" -> Broadcast Motion 3:3 (slice2; segments: 3) (cost=0.00..431.00 rows=1 width=4)"],
[" -> Seq Scan on cal_txtest (cost=0.00..431.00 rows=1 width=4)"],
[" -> Index Only Scan using cal_txtest_a_idx on cal_txtest cal_txtest_1 (cost=0.00..6.00 rows=1 width=1)"],
[" Index Cond: (a = cal_txtest.a)"]
]
(scan, cost) = explain_join_scan(Mock(), "mock sql query string")
self.assertEqual(scan, cal_bitmap_test.INDEX_ONLY_SCAN)
self.assertEqual(cost, 437.00)
@patch('cal_bitmap_test.timed_execute_and_check_timeout')
@patch('cal_bitmap_test.execute_sql')
def test_x(self, mock_execute, mock_execute_and_check_timeout):
mock_conn = Mock()
mock_setup = Mock()
mock_parameterizeMethod = Mock()
mock_explain_method = Mock()
mock_explain_method.side_effect = [(cal_bitmap_test.INDEX_ONLY_SCAN, 1.1),
(cal_bitmap_test.INDEX_SCAN, 2.1),
(cal_bitmap_test.INDEX_ONLY_SCAN, 1.2),
(cal_bitmap_test.INDEX_SCAN, 2.2),
(cal_bitmap_test.INDEX_ONLY_SCAN, 1.3),
(cal_bitmap_test.INDEX_SCAN, 2.3)
]
mock_reset_method = Mock()
plan_ids = [cal_bitmap_test.INDEX_ONLY_SCAN, cal_bitmap_test.INDEX_SCAN]
mock_force_methods = MagicMock()
explainDict, execDict, errMessages = find_crossover(mock_conn, 0, 2, mock_setup, mock_parameterizeMethod, mock_explain_method,
mock_reset_method, plan_ids, mock_force_methods, 1)
self.assertEqual(explainDict, {0: ('indexonly_scan', 1.1, 1.2, 1.3), 1: ('index_scan', 2.1, 2.2, 2.3)})
if __name__ == '__main__':
unittest.main() |
299,937 | get subgraph | __all__ = ['path_sign_to_signed_nodes', 'signed_nodes_to_signed_edge',
'get_sorted_neighbors', 'get_subgraph', 'Node', 'Edge',
'EdgeFilter', 'SendType']
import logging
import networkx as nx
import functools
from typing import List, Tuple, Union, Optional, Callable, Set
logger = logging.getLogger(__name__)
# Derived type hints
Node = Union[str, Tuple[str, int]]
Edge = Tuple[Node, Node]
EdgeFilter = Callable[[nx.DiGraph, Node, Node], bool]
SendType = Tuple[Optional[Set[Node]], Optional[Set[Edge]]]
def path_sign_to_signed_nodes(source, target, edge_sign):
"""Translates a signed edge or path to valid signed nodes
Pairs with a negative source node are filtered out.
Parameters
----------
source : str|int
The source node
target : str|int
The target node
edge_sign : int
The sign of the edge
Returns
-------
sign_tuple : (a, sign), (b, sign)
Tuple of tuples of the valid combination of signed nodes
"""
# Sign definitions: + == 0, - == 1
# + path -> (a+, b+)
# - path -> (a+, b-)
# (a-, b-) and (a-, b+) are also technically valid but not in this context
try:
if int(edge_sign) == 0:
return (source, 0), (target, 0)
else:
return (source, 1), (target, 0)
except ValueError:
logger.warning('Invalid sign %s when translating edge sign to int'
% edge_sign)
return (None, None), (None, None)
def signed_nodes_to_signed_edge(source, target):
"""Create the triple (node, node, sign) from a pair of signed nodes
Assuming source, target forms an edge of signed nodes:
edge = (a, sign), (b, sign), return the corresponding signed edge triple
Parameters
----------
source : tuple(str|int, sign)
A valid signed node
target : tuple(str|int, sign)
A valid signed node
Returns
-------
tuple
A tuple, (source, target, sign), representing the corresponding
signed edge.
"""
# Sign definitions: + == 0, - == 1
# + edge/path -> (a+, b+) and (a-, b-)
# - edge/path -> (a-, b+) and (a+, b-)
source_name, source_sign = source
target_name, target_sign = target
try:
if int(source_sign) == int(target_sign):
return source_name, target_name, 0
else:
return source_name, target_name, 1
except ValueError:
logger.warning('Error translating signed nodes to signed edge using '
'(%s, %s)' % (source, target))
return None, None, None
def get_sorted_neighbors(
G: nx.DiGraph,
node: Node,
reverse: bool,
force_edges: Optional[List[Edge]] = None,
edge_filter: Optional[EdgeFilter] = None
) -> List[Node]:
"""Filter and sort neighbors of a node in descending order by belief
Parameters
----------
G
A networkx DiGraph
node
A valid node name or signed node name
reverse
Indicates direction of search. Neighbors are either successors
(downstream search) or predecessors (reverse search).
force_edges
A list of allowed edges. If provided, only allow neighbors that
can be reached by the allowed edges.
edge_filter
If provided, must be a function that takes three arguments: a graph
g, and the nodes u, v of the edge between u and v. The function must
return a boolean. The function must return True if the edge is
allowed, otherwise False.
Returns
-------
List[Node]
A list of nodes representing the filtered and sorted neighbors
"""
# Check for edge filtering
if force_edges or edge_filter:
neigh_edges = G.in_edges if reverse else G.out_edges
ix = 0 if reverse else 1
edges = set(neigh_edges(node))
if force_edges:
edges = edges.intersection(set(force_edges))
if edge_filter:
neighbors = (e[ix] for e in edges if edge_filter(G, *e))
else:
neighbors = (e[ix] for e in edges)
# No edge filtering applied
else:
neighbors = G.predecessors(node) if reverse else G.successors(node)
# Return neighbors sorted by the edge belief
if reverse:
return sorted(neighbors,
key=lambda n: G.edges[n, node].get('belief', 0),
reverse=True)
else:
return sorted(neighbors,
key=lambda n: G.edges[node, n].get('belief', 0),
reverse=True)
def METHOD_NAME(g, edge_filter_func):
"""Get a subgraph of original graph filtered by a provided function."""
logger.info('Getting subgraph with %s function' % edge_filter_func)
view = nx.subgraph_view(
g, filter_edge=functools.partial(edge_filter_func, g))
# Copying to get a graph object instead of view
new_g = view.copy()
return new_g |
299,938 | surface temp | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyowm.utils import formatting, measurables
class Soil:
"""
Soil data over a specific Polygon
:param reference_time: UTC UNIX time of soil data measurement
:type reference_time: int
:param surface_temp: soil surface temperature in Kelvin degrees
:type surface_temp: float
:param ten_cm_temp: soil temperature at 10 cm depth in Kelvin degrees
:type ten_cm_temp: float
:param moisture: soil moisture in m^3/m^3
:type moisture: float
:param polygon_id: ID of the polygon this soil data was measured upon
:type polygon_id: str
:returns: a `Soil` instance
:raises: `AssertionError` when any of the mandatory fields is `None` or has wrong type
"""
def __init__(self, reference_time, METHOD_NAME, ten_cm_temp, moisture, polygon_id=None):
assert reference_time is not None
assert isinstance(reference_time, int), 'reference time must be a UNIX int timestamp'
if reference_time < 0:
raise ValueError("reference_time must be greater than 0")
self._reference_time = reference_time
assert METHOD_NAME is not None
assert isinstance(METHOD_NAME, (float, int)), 'surface_temp must be a number'
self._surface_temp = METHOD_NAME
assert ten_cm_temp is not None
assert isinstance(ten_cm_temp, (float, int)), 'ten_cm_temp must be a number'
self._ten_cm_temp = ten_cm_temp
assert moisture is not None
assert isinstance(moisture, (float, int)), 'moisture must be a number'
if moisture < 0.:
raise ValueError("moisture must be greater than 0")
self.moisture = moisture
self.polygon_id = polygon_id
def reference_time(self, timeformat='unix'):
"""Returns the UTC time telling when the soil data was measured
:param timeformat: the format for the time value. May be:
'*unix*' (default) for UNIX time
'*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00``
'*date* for ``datetime.datetime`` object instance
:type timeformat: str
:returns: an int or a str
"""
return formatting.timeformat(self._reference_time, timeformat)
def METHOD_NAME(self, unit='kelvin'):
"""Returns the soil surface temperature
:param unit: the unit of measure for the temperature value. May be:
'*kelvin*' (default), '*celsius*' or '*fahrenheit*'
:type unit: str
:returns: a float
:raises: ValueError when unknown temperature units are provided
"""
if unit == 'kelvin':
return self._surface_temp
if unit == 'celsius':
return measurables.kelvin_to_celsius(self._surface_temp)
if unit == 'fahrenheit':
return measurables.kelvin_to_fahrenheit(self._surface_temp)
else:
raise ValueError('Wrong temperature unit')
def ten_cm_temp(self, unit='kelvin'):
"""Returns the soil temperature measured 10 cm below surface
:param unit: the unit of measure for the temperature value. May be:
'*kelvin*' (default), '*celsius*' or '*fahrenheit*'
:type unit: str
:returns: a float
:raises: ValueError when unknown temperature units are provided
"""
if unit == 'kelvin':
return self._ten_cm_temp
if unit == 'celsius':
return measurables.kelvin_to_celsius(self._ten_cm_temp)
if unit == 'fahrenheit':
return measurables.kelvin_to_fahrenheit(self._ten_cm_temp)
else:
raise ValueError('Wrong temperature unit')
@classmethod
def from_dict(cls, the_dict):
assert isinstance(the_dict, dict)
reference_time = the_dict['reference_time']
METHOD_NAME = the_dict['surface_temp']
ten_cm_temp = the_dict['ten_cm_temp']
moisture = the_dict['moisture']
polygon_id = the_dict.get('polygon_id', None)
return Soil(reference_time, METHOD_NAME, ten_cm_temp, moisture, polygon_id)
def to_dict(self):
return {'reference_time': self._reference_time,
'surface_temp': self._surface_temp,
'ten_cm_temp': self._ten_cm_temp,
'moisture': self.moisture,
'polygon_id': self.polygon_id}
def __repr__(self):
return "<%s.%s - polygon_id=%s,reference time=%s,>" % (__name__, self.__class__.__name__,
self.polygon_id, self.reference_time('iso')) |
299,939 | test models artifact repo init with stage | from unittest import mock
from unittest.mock import Mock
import pytest
from mlflow import MlflowClient
from mlflow.entities.model_registry import ModelVersion
from mlflow.store.artifact.databricks_models_artifact_repo import DatabricksModelsArtifactRepository
from mlflow.store.artifact.models_artifact_repo import ModelsArtifactRepository
from mlflow.store.artifact.unity_catalog_models_artifact_repo import (
UnityCatalogModelsArtifactRepository,
)
from tests.store.artifact.constants import (
UC_MODELS_ARTIFACT_REPOSITORY,
WORKSPACE_MODELS_ARTIFACT_REPOSITORY,
)
@pytest.mark.parametrize(
"uri_with_profile",
[
"models://profile@databricks/MyModel/12",
"models://profile@databricks/MyModel/Staging",
"models://profile@databricks/MyModel/Production",
],
)
def test_models_artifact_repo_init_with_uri_containing_profile(uri_with_profile):
with mock.patch(WORKSPACE_MODELS_ARTIFACT_REPOSITORY, autospec=True) as mock_repo:
models_repo = ModelsArtifactRepository(uri_with_profile)
assert models_repo.artifact_uri == uri_with_profile
assert isinstance(models_repo.repo, DatabricksModelsArtifactRepository)
mock_repo.assert_called_once_with(uri_with_profile)
@pytest.mark.parametrize(
"uri_without_profile",
["models:/MyModel/12", "models:/MyModel/Staging", "models:/MyModel/Production"],
)
def test_models_artifact_repo_init_with_db_profile_inferred_from_context(uri_without_profile):
with mock.patch(WORKSPACE_MODELS_ARTIFACT_REPOSITORY, autospec=True) as mock_repo, mock.patch(
"mlflow.store.artifact.utils.models.mlflow.get_registry_uri",
return_value="databricks://getRegistryUriDefault",
):
models_repo = ModelsArtifactRepository(uri_without_profile)
assert models_repo.artifact_uri == uri_without_profile
assert isinstance(models_repo.repo, DatabricksModelsArtifactRepository)
mock_repo.assert_called_once_with(uri_without_profile)
def test_models_artifact_repo_init_with_uc_registry_db_profile_inferred_from_context():
model_uri = "models:/MyModel/12"
uc_registry_uri = "databricks-uc://getRegistryUriDefault"
with mock.patch(UC_MODELS_ARTIFACT_REPOSITORY, autospec=True) as mock_repo, mock.patch(
"mlflow.get_registry_uri", return_value=uc_registry_uri
):
models_repo = ModelsArtifactRepository(model_uri)
assert models_repo.artifact_uri == model_uri
assert isinstance(models_repo.repo, UnityCatalogModelsArtifactRepository)
mock_repo.assert_called_once_with(model_uri, registry_uri=uc_registry_uri)
def test_models_artifact_repo_init_with_version_uri_and_not_using_databricks_registry():
non_databricks_uri = "non_databricks_uri"
artifact_location = "s3://blah_bucket/"
with mock.patch.object(
MlflowClient, "get_model_version_download_uri", return_value=artifact_location
), mock.patch(
"mlflow.store.artifact.utils.models.mlflow.get_registry_uri",
return_value=non_databricks_uri,
), mock.patch(
"mlflow.store.artifact.artifact_repository_registry.get_artifact_repository",
return_value=None,
) as get_repo_mock:
model_uri = "models:/MyModel/12"
ModelsArtifactRepository(model_uri)
get_repo_mock.assert_called_once_with(artifact_location)
def METHOD_NAME():
model_uri = "models:/MyModel/Staging"
artifact_location = "s3://blah_bucket/"
model_version_detailed = ModelVersion(
"MyModel",
"10",
"2345671890",
"234567890",
"some description",
"UserID",
"Production",
"source",
"run12345",
)
get_latest_versions_patch = mock.patch.object(
MlflowClient, "get_latest_versions", return_value=[model_version_detailed]
)
get_model_version_download_uri_patch = mock.patch.object(
MlflowClient, "get_model_version_download_uri", return_value=artifact_location
)
with get_latest_versions_patch, get_model_version_download_uri_patch, mock.patch(
"mlflow.store.artifact.artifact_repository_registry.get_artifact_repository",
return_value=None,
) as get_repo_mock:
ModelsArtifactRepository(model_uri)
get_repo_mock.assert_called_once_with(artifact_location)
def test_models_artifact_repo_uses_repo_download_artifacts():
"""
``ModelsArtifactRepository`` should delegate `download_artifacts` to its
``self.repo.download_artifacts`` function.
"""
artifact_location = "s3://blah_bucket/"
with mock.patch.object(
MlflowClient, "get_model_version_download_uri", return_value=artifact_location
):
model_uri = "models:/MyModel/12"
models_repo = ModelsArtifactRepository(model_uri)
models_repo.repo = Mock()
models_repo.download_artifacts("artifact_path", "dst_path")
models_repo.repo.download_artifacts.assert_called_once()
def test_split_models_uri():
assert ModelsArtifactRepository.split_models_uri("models:/model/1") == ("models:/model/1", "")
assert ModelsArtifactRepository.split_models_uri("models:/model/1/path") == (
"models:/model/1",
"path",
)
assert ModelsArtifactRepository.split_models_uri("models:/model/1/path/to/artifact") == (
"models:/model/1",
"path/to/artifact",
) |
299,940 | test remove sequence | # lint-amnesty, pylint: disable=missing-module-docstring
from datetime import datetime, timezone
from unittest import TestCase
import pytest
from opaque_keys.edx.keys import CourseKey
import attr
from ...data import (
CourseOutlineData, CourseSectionData, CourseLearningSequenceData, VisibilityData, CourseVisibility
)
class TestCourseOutlineData(TestCase):
"""
Simple set of tests for data class validations.
"""
@classmethod
def setUpClass(cls):
"""
All our data classes are immutable, so we can set up a baseline course
outline and then make slightly modified versions for each particular
test as needed.
"""
super().setUpClass()
normal_visibility = VisibilityData( # lint-amnesty, pylint: disable=unused-variable
hide_from_toc=False,
visible_to_staff_only=False
)
cls.course_key = CourseKey.from_string("course-v1:OpenEdX+Learning+TestRun")
cls.course_outline = CourseOutlineData(
course_key=cls.course_key,
title="Exciting Test Course!",
published_at=datetime(2020, 5, 19, tzinfo=timezone.utc),
published_version="5ebece4b69dd593d82fe2014",
entrance_exam_id=None,
days_early_for_beta=None,
sections=generate_sections(cls.course_key, [3, 2]),
self_paced=False,
course_visibility=CourseVisibility.PRIVATE
)
def test_deprecated_course_key(self):
"""Old-Mongo style, "Org/Course/Run" keys are not supported."""
old_course_key = CourseKey.from_string("OpenEdX/TestCourse/TestRun")
with pytest.raises(ValueError):
attr.evolve(self.course_outline, course_key=old_course_key)
def test_sequence_building(self):
"""Make sure sequences were set correctly from sections data."""
for section in self.course_outline.sections:
for seq in section.sequences:
assert seq == self.course_outline.sequences[seq.usage_key]
assert sum(len(section.sequences) for section in self.course_outline.sections) ==\
len(self.course_outline.sequences)
def test_duplicate_sequence(self):
"""We don't support DAGs. Sequences can only be in one Section."""
# This section has Chapter 2's sequences in it
section_with_dupe_seq = attr.evolve(
self.course_outline.sections[1], title="Chapter 2 dupe",
)
with pytest.raises(ValueError):
attr.evolve(
self.course_outline,
sections=self.course_outline.sections + [section_with_dupe_seq]
)
def test_size(self):
"""Limit how large a CourseOutline is allowed to be."""
with pytest.raises(ValueError):
attr.evolve(
self.course_outline,
sections=generate_sections(self.course_key, [1001])
)
def METHOD_NAME(self):
"""Remove a single sequence from the CourseOutlineData (creates a copy)."""
seq_to_remove = self.course_outline.sections[0].sequences[0]
new_outline = self.course_outline.remove({seq_to_remove.usage_key})
assert self.course_outline != new_outline
assert seq_to_remove.usage_key in self.course_outline.sequences
assert seq_to_remove.usage_key not in new_outline.sequences
assert len(new_outline.sections[0].sequences) == len(self.course_outline.sections[0].sequences) - 1
for seq in new_outline.sections[0].sequences:
assert seq != seq_to_remove
def test_remove_section(self):
"""
Remove a whole Section from the CourseOutlineData (creates a copy).
Removing a Section also removes all Sequences in that Section.
"""
section_to_remove = self.course_outline.sections[0]
new_outline = self.course_outline.remove({section_to_remove.usage_key})
assert self.course_outline != new_outline
assert len(new_outline.sections) == len(self.course_outline.sections) - 1
assert section_to_remove != new_outline.sections[0]
for seq in section_to_remove.sequences:
assert seq.usage_key not in new_outline.sequences
def test_remove_nonexistant(self):
"""Removing something that's not already there is a no-op."""
seq_key_to_remove = self.course_key.make_usage_key('sequential', 'not_here')
new_outline = self.course_outline.remove({seq_key_to_remove})
assert new_outline == self.course_outline
def test_days_early_for_beta(self):
"""
Check that days_early_for_beta exists, can be set, and validates correctly.
"""
assert self.course_outline.days_early_for_beta is None
new_outline = attr.evolve(
self.course_outline,
days_early_for_beta=5
)
assert new_outline is not None
assert new_outline != self.course_outline
assert new_outline.days_early_for_beta == 5
with pytest.raises(ValueError) as error:
attr.evolve(self.course_outline, days_early_for_beta=-1)
assert error.match(
"Provided value -1 for days_early_for_beta is invalid. The value must be positive or zero. "
"A positive value will shift back the starting date for Beta users by that many days."
)
def test_empty_user_partition_groups(self):
"""
A user partition group entry with no groups is an error.
This would mean that a piece of content is associated with a partition
but nobody would ever be able to see it because it's not associated with
any group in the partition.
"""
sections = generate_sections(self.course_key, [1])
valid_section = attr.evolve(
sections[0],
user_partition_groups={
50: frozenset([1, 2, 3]),
51: frozenset([1]),
}
)
with self.assertRaises(ValueError):
attr.evolve(
valid_section,
user_partition_groups={
50: frozenset([1, 2, 3]),
51: frozenset(), # This is not allowed
}
)
def generate_sections(course_key, num_sequences):
"""
Generate a list of CourseSectionData.
`num_sequences` is a list that contains the length of each CourseSectionData
in order. So if you pass in [1, 3, 5], we would pass back a list of three
CourseSectionData, where the first one has 1 CourseLearningSequenceData as
it sequences, the second had 3 sequences, and the third had 5 sequences.
All sections and sequences have normal visibility.
"""
normal_visibility = VisibilityData(
hide_from_toc=False,
visible_to_staff_only=False
)
sections = []
for sec_num, seq_count in enumerate(num_sequences, 1):
sections.append(
CourseSectionData(
usage_key=course_key.make_usage_key('chapter', f'ch_{sec_num}'),
title=f"Chapter {sec_num}: 🔥",
visibility=normal_visibility,
sequences=[
CourseLearningSequenceData(
usage_key=course_key.make_usage_key(
'sequential', f'seq_{sec_num}_{seq_num}'
),
title=f"Seq {sec_num}.{seq_num}: 🔥",
visibility=normal_visibility,
)
for seq_num in range(seq_count)
]
)
)
return sections |
299,941 | construct data reader | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
# Note: The L1 norm is not differentiable at 0, so we make sure values
# are away from 0.
np.random.seed(2019102412)
_num_samples = 23
_sample_size = 17
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def num_samples():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = METHOD_NAME(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
dims=_sample_size),
lbann.WeightsLayer(weights=x_weights,
dims=_sample_size))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Sigmoid(x, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = np.where(x >= 0,
1 / (1 + np.exp(-x)),
np.exp(x) / (1 + np.exp(x)))
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.Sigmoid(x, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i).astype(np.float64)
y = np.where(x >= 0,
1 / (1 + np.exp(-x)),
np.exp(x) / (1 + np.exp(x)))
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def METHOD_NAME(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func |
299,942 | cmake args | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class Rocfft(CMakePackage):
"""Radeon Open Compute FFT library"""
homepage = "https://github.com/ROCmSoftwarePlatform/rocFFT/"
git = "https://github.com/ROCmSoftwarePlatform/rocFFT.git"
url = "https://github.com/ROCmSoftwarePlatform/rocfft/archive/rocm-5.5.0.tar.gz"
tags = ["rocm"]
maintainers("cgmb", "srekolam", "renjithravindrankannath", "haampie")
libraries = ["librocfft"]
version("5.5.1", sha256="57423a64f5cdb1c37ff0891b6c17b59f73198d46be42db4ae23781ef2c0cd49d")
version("5.5.0", sha256="9288152e66504b06082e4eed8cdb791b4f9ae2836b3defbeb4d2b54901b96485")
version("5.4.3", sha256="ed9664adc9825c237327497bc4b23f020d50be7645647f14a45f4d943dd506e7")
version("5.4.0", sha256="d35a67332f4425fba1824eed78cf98d5c9a17a422614ff3f4cba2461df952336")
version("5.3.3", sha256="678c18710578c1fb36a0009311bb79de7607c3468f9102cfba56a866ebb7ff78")
version("5.3.0", sha256="d655c5541c4aff4267e80e36d002fc3a55c2f84a0ae8631197c12af3bf03fa7d")
version("5.2.3", sha256="0cee37886f01f1afb3ae5dad1164c819573c13c6675bff4eb668de334adbff27")
version("5.2.1", sha256="6302349b6cc610a9a939377e2c7ffba946656a8d43f2e438ff0b3088f0f963ad")
version("5.2.0", sha256="ebba280b7879fb4bc529a68072b98d4e815201f90d24144d672094bc241743d4")
version("5.1.3", sha256="b4fcd03c1b07d465bb307ec33cc7fb50036dff688e497c5e52b2dec37f4cb618")
version("5.1.0", sha256="dc11c9061753ae43a9d5db9c4674aa113a8adaf50818b2701cbb940894147f68")
version(
"5.0.2",
sha256="30d4bd5fa85185ddafc69fa6d284edd8033c9d77d1e351fa328267242995eb0a",
deprecated=True,
)
version(
"5.0.0",
sha256="c16374dac2f85fbaf145511653e93f6db3151425ce39b282187745c716b67405",
deprecated=True,
)
version(
"4.5.2",
sha256="2724118ca00b9e97ac9578fe0b7e64a82d86c4fb0246d0da88d8ddd9c608b1e1",
deprecated=True,
)
version(
"4.5.0",
sha256="045c1cf1737db6e7ee332c274dacdb565f99c976ed4cc5626a116878dc80a48c",
deprecated=True,
)
version(
"4.3.1",
sha256="fcdc4d12b93d967b6f992b4045da98433eabf2ee0ba84fc6b6f81e380584fbc9",
deprecated=True,
)
version(
"4.3.0",
sha256="cb5b8f62330bc61b17a3a2fd1500068ee05d48cb51797901dd259dbc84610478",
deprecated=True,
)
version(
"4.2.0",
sha256="db29c9067f0cfa98bddd3574f6aa7200cfc790cc6da352d19e4696c3f3982163",
deprecated=True,
)
version(
"4.1.0",
sha256="df23fcb05aae72557461ae3687be7e3b8b78be4132daf1aa9dc07339f4eba0cc",
deprecated=True,
)
version(
"4.0.0",
sha256="d1d10d270f822e0bab64307313ef163ba449b058bf3352962bbb26d4f4db89d0",
deprecated=True,
)
version(
"3.10.0",
sha256="9f57226aac7d9a0515e14a5a5b08a85e727de72b3f9c2177daf56749ac2c76ae",
deprecated=True,
)
version(
"3.9.0",
sha256="9c9c0b7f09bab17250f5101d1605e7a61218eae828a3eb8fe048d607181294ce",
deprecated=True,
)
version(
"3.8.0",
sha256="ed23009796e2ee7c43dcc24527f2d6b1d7a73dceac06c30384460098d2fe1556",
deprecated=True,
)
version(
"3.7.0",
sha256="94462e4bd19c2c749fcf6903adbee66d4d3bd345c0246861ff8f40b9d08a6ead",
deprecated=True,
)
version(
"3.5.0",
sha256="629f02cfecb7de5ad2517b6a8aac6ed4de60d3a9c620413c4d9db46081ac2c88",
deprecated=True,
)
amdgpu_targets = ROCmPackage.amdgpu_targets
variant(
"amdgpu_target",
description="AMD GPU architecture",
values=auto_or_any_combination_of(*amdgpu_targets),
sticky=True,
)
variant(
"amdgpu_target_sram_ecc",
description="AMD GPU architecture",
values=auto_or_any_combination_of(*amdgpu_targets),
sticky=True,
)
depends_on("[email protected]:", type="build", when="@4.5.0:")
depends_on("[email protected]:", type="build")
depends_on("[email protected]:", type="build", when="@5.0.0:")
depends_on("[email protected]:", when="@5.0.0:")
depends_on("[email protected]:", type="test")
depends_on("[email protected]:", type="test")
depends_on("[email protected]: +program_options", type="test")
depends_on("llvm-amdgpu +openmp", type="test")
def check(self):
exe = join_path(self.build_directory, "clients", "staging", "rocfft-test")
self.run_test(exe, options="--gtest_filter=mix*:adhoc*")
for ver in [
"3.5.0",
"3.7.0",
"3.8.0",
"3.9.0",
"3.10.0",
"4.0.0",
"4.1.0",
"4.2.0",
"4.3.0",
"4.3.1",
"4.5.0",
"4.5.2",
"5.0.0",
"5.0.2",
"5.1.0",
"5.1.3",
"5.2.0",
"5.2.1",
"5.2.3",
"5.3.0",
"5.3.3",
"5.4.0",
"5.4.3",
"5.5.0",
"5.5.1",
]:
depends_on("hip@" + ver, when="@" + ver)
depends_on("rocm-cmake@%s:" % ver, type="build", when="@" + ver)
patch("0001-Improve-compilation-by-using-sqlite-recipe-for-rocfft.patch", when="@5.0.0:5.0.2")
# Patch to add spack build test support. No longer required from 5.2
patch("0002-Fix-clients-fftw3-include-dirs-rocm-4.2.patch", when="@4.2.0:4.3.1")
patch("0003-Fix-clients-fftw3-include-dirs-rocm-4.5.patch", when="@4.5.0:5.1")
# Patch to add install prefix header location for sqlite for 5.4
patch("0004-fix-missing-sqlite-include-paths.patch", when="@5.4.0:5.5")
def setup_build_environment(self, env):
env.set("CXX", self.spec["hip"].hipcc)
@classmethod
def determine_version(cls, lib):
match = re.search(r"lib\S*\.so\.\d+\.\d+\.(\d)(\d\d)(\d\d)", lib)
if match:
ver = "{0}.{1}.{2}".format(
int(match.group(1)), int(match.group(2)), int(match.group(3))
)
else:
ver = None
return ver
def METHOD_NAME(self):
args = [self.define("BUILD_CLIENTS_TESTS", self.run_tests)]
tgt = self.spec.variants["amdgpu_target"]
if "auto" not in tgt:
if "@:3.8.0" in self.spec:
args.append(
self.define(
"CMAKE_CXX_FLAGS", "--amdgpu-target={0}".format(",".join(tgt.value))
)
)
else:
args.append(self.define_from_variant("AMDGPU_TARGETS", "amdgpu_target"))
# From version 3.9 and above we have AMDGPU_TARGETS_SRAM_ECC
tgt_sram = self.spec.variants["amdgpu_target_sram_ecc"]
if "auto" not in tgt_sram and self.spec.satisfies("@3.9.0:4.0.0"):
args.append(
self.define_from_variant("AMDGPU_TARGETS_SRAM_ECC", "amdgpu_target_sram_ecc")
)
# See https://github.com/ROCmSoftwarePlatform/rocFFT/issues/322
if self.spec.satisfies("^[email protected]:3.21.2"):
args.append(self.define("__skip_rocmclang", "ON"))
if self.spec.satisfies("@5.0.0:"):
args.append(self.define("SQLITE_USE_SYSTEM_PACKAGE", "ON"))
if self.spec.satisfies("@5.2.0:"):
args.append(self.define("BUILD_FILE_REORG_BACKWARD_COMPATIBILITY", True))
if self.spec.satisfies("@5.3.0:"):
args.append("-DCMAKE_INSTALL_LIBDIR=lib")
return args |
299,943 | id for label | # SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2019 - 2020 Dimpact
from typing import Optional, Union
from django import forms
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.gdal import AxisOrder, OGRGeometry, SpatialReference
from django.contrib.gis.geos import GEOSGeometry
from django.utils.translation import ugettext_lazy as _
from dateutil.relativedelta import relativedelta
from relativedeltafield.utils import format_relativedelta, parse_relativedelta
class BooleanRadio(forms.RadioSelect):
def __init__(self, attrs=None):
choices = ((True, _("Yes")), (False, _("No")))
super().__init__(attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, False)
return {True: True, "True": True, "False": False, False: False}[value]
class SplitRelativeDeltaWidget(forms.Widget):
"""
Present durations as a split widget.
Given a duration and the ISO8601 duration format, provide an input for
every component of the duration. Year, months and days are always
presented, the remaining components only if they have a value set.
.. note:: fractional durations are currently not support, such as P0.5Y
"""
template_name = "admin/widgets/split_relative_delta.html"
def METHOD_NAME(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += "_0"
return id_
def value_from_datadict(self, data, files, name) -> str:
# In case the value was directly injected into the form data, e.g. if validation
# happens on the backend, simply take that value
if name in data and isinstance(data[name], relativedelta):
duration = data[name]
else:
value_from_datadict = forms.NumberInput().value_from_datadict
years = value_from_datadict(data, files, f"{name}_years")
months = value_from_datadict(data, files, f"{name}_months")
days = value_from_datadict(data, files, f"{name}_days")
hours = value_from_datadict(data, files, f"{name}_hours")
minutes = value_from_datadict(data, files, f"{name}_minutes")
seconds = value_from_datadict(data, files, f"{name}_seconds")
microseconds = value_from_datadict(data, files, f"{name}_microseconds")
duration = relativedelta(
years=int(years or 0),
months=int(months or 0),
days=int(days or 0),
hours=int(hours or 0),
minutes=int(minutes or 0),
seconds=int(seconds or 0),
microseconds=int(microseconds or 0),
)
return format_relativedelta(duration)
def get_context(self, name, value: Union[relativedelta, str], attrs=None):
attrs = {} if attrs is None else attrs
context = super().get_context(name, value, attrs)
value = value or relativedelta()
final_attrs = self.build_attrs(attrs)
final_attrs.update({"min": 0})
if isinstance(value, str):
value = parse_relativedelta(value)
years_widget = self._build_subwidget_context(
name, value, final_attrs, "years", _("# jaren"), required=True
)
months_widget = self._build_subwidget_context(
name, value, final_attrs, "months", _("# maanden"), required=True
)
days_widget = self._build_subwidget_context(
name, value, final_attrs, "days", _("# dagen"), required=True
)
hours_widget = self._build_subwidget_context(
name, value, final_attrs, "hours", _("# uren")
)
minutes_widget = self._build_subwidget_context(
name, value, final_attrs, "minutes", _("# minuten")
)
seconds_widget = self._build_subwidget_context(
name, value, final_attrs, "seconds", _("# seconden")
)
microseconds_widget = self._build_subwidget_context(
name, value, final_attrs, "microseconds", _("# microseconden")
)
subwidgets = [
years_widget,
months_widget,
# weeks is skipped, because internally it's converted to days
days_widget,
hours_widget,
minutes_widget,
seconds_widget,
microseconds_widget,
]
context["widget"]["subwidgets"] = [
widget for widget in subwidgets if widget is not None
]
return context
def _get_subwidget_value(self, value: relativedelta, attr: str) -> Optional[int]:
if not value:
return None
return getattr(value, attr) or None
def _build_subwidget_context(
self,
name: str,
value: relativedelta,
final_attrs: dict,
attribute: str,
placeholder,
required: bool = False,
) -> Optional[forms.Widget]:
value = self._get_subwidget_value(value, attribute)
if value is None and not required:
return None
id_ = final_attrs.get("id")
attrs = {
**final_attrs,
"placeholder": placeholder,
"title": placeholder,
}
if id_:
attrs["id"] = f"{id_}_{attribute}"
widget_context = forms.NumberInput().get_context(
name=f"{name}_{attribute}", value=value, attrs=attrs,
)
return widget_context["widget"]
class AuthoritySpatialReference(SpatialReference):
def __init__(self, srs_input="", srs_type="user"):
super().__init__(srs_input, srs_type, axis_order=AxisOrder.AUTHORITY)
class AuthorityAxisOrderOLWidget(OpenLayersWidget):
"""
Here is a long and painful explanation why we need it. Buckle up.
First, `Zaak.zaakgeometry` field is geometric field, not geographic. If it's a point, it has x and y coordinates.
But how do we map them to lat and lon? What is the order - lat/lon or lon/lat?
Well, there is no consensus what should be the order.
OpenZaak supports only "ESPG:4326" coordinate system. According to "ESPG:4326" it should be lat/lon order.
GDAL<3.0 expects lon/lat order and treat all points like lon/lat.
Good news, that GDAL>=3.0 can use the order defined in CRS. And in Open Zaak we support GDAL >= 3.0
BUT django.contrib.gis.gdal uses traditional axis order (lon/lat) as a default one and user can set up only
SRID without axis order when initializing geometry objects.
OpenStreetMap supports "ESPG:3587" coordinate system. So in the parent class "ESPG:4326" coordinates are
transformed to "ESPG:3587" using GDAL api with traditional axis order, where 'x' is treated as 'lon'
and 'y' is treated as 'lat'
In this class we transform coordinates with "Authority" order, for "ESPG:4326" it's lat/lon.
If in next django versions "axis_order" is treated with more attention, this workaround should be removed.
This workaround won't work if os GDAL<3.0. Perhaps, in this case we can use django-leaflet?
GDAL related doc - https://gdal.org/tutorials/osr_api_tut.html#crs-and-axis-order
"""
data_srid = 4326
def get_context(self, name, value, attrs):
if value:
ogr = OGRGeometry(value.wkt, AuthoritySpatialReference(value.srid))
# ogr = value.ogr
ogr.transform(self.params["srid"])
value = GEOSGeometry(ogr._geos_ptr(), srid=ogr.srid)
return super().get_context(name, value, attrs)
def deserialize(self, value):
value = GEOSGeometry(value)
if value.srid and value.srid != self.data_srid:
value.transform(AuthoritySpatialReference(self.data_srid))
return value |
299,944 | test saving to disc index linestring | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import pytest
from sedona.core.SpatialRDD import PointRDD, PolygonRDD, LineStringRDD
from sedona.core.enums import IndexType, GridType
from sedona.core.formatMapper.disc_utils import load_spatial_rdd_from_disc, \
load_spatial_index_rdd_from_disc, GeoType
from sedona.core.spatialOperator import JoinQuery
from tests.test_base import TestBase
from tests.tools import tests_resource
def remove_directory(path: str) -> bool:
try:
shutil.rmtree(path)
except Exception as e:
return False
return True
disc_location = os.path.join(tests_resource, "spatial_objects/temp")
class TestDiscUtils(TestBase):
def test_saving_to_disc_spatial_rdd_point(self):
from tests.properties.point_properties import input_location, offset, splitter, num_partitions
point_rdd = PointRDD(
self.sc, input_location, offset, splitter, True, num_partitions
)
point_rdd.rawJvmSpatialRDD.saveAsObjectFile(os.path.join(disc_location, "point"))
def test_saving_to_disc_spatial_rdd_polygon(self):
from tests.properties.polygon_properties import input_location, splitter, num_partitions
polygon_rdd = PolygonRDD(
self.sc,
input_location,
splitter,
True,
num_partitions
)
polygon_rdd.rawJvmSpatialRDD.saveAsObjectFile(os.path.join(disc_location, "polygon"))
def test_saving_to_disc_spatial_rdd_linestring(self):
from tests.properties.linestring_properties import input_location, splitter, num_partitions
linestring_rdd = LineStringRDD(
self.sc,
input_location,
splitter,
True,
num_partitions
)
linestring_rdd.rawJvmSpatialRDD.saveAsObjectFile(os.path.join(disc_location, "line_string"))
def METHOD_NAME(self):
from tests.properties.linestring_properties import input_location, splitter, num_partitions
linestring_rdd = LineStringRDD(
self.sc,
input_location,
splitter,
True,
num_partitions
)
linestring_rdd.buildIndex(IndexType.RTREE, False)
linestring_rdd.indexedRawRDD.saveAsObjectFile(os.path.join(disc_location, "line_string_index"))
def test_saving_to_disc_index_polygon(self):
from tests.properties.polygon_properties import input_location, splitter, num_partitions
polygon_rdd = PolygonRDD(
self.sc,
input_location,
splitter,
True,
num_partitions
)
polygon_rdd.buildIndex(IndexType.RTREE, False)
polygon_rdd.indexedRawRDD.saveAsObjectFile(os.path.join(disc_location, "polygon_index"))
def test_saving_to_disc_index_point(self):
from tests.properties.point_properties import input_location, offset, splitter, num_partitions
point_rdd = PointRDD(
self.sc, input_location, offset, splitter, True, num_partitions)
point_rdd.buildIndex(IndexType.RTREE, False)
point_rdd.indexedRawRDD.saveAsObjectFile(os.path.join(disc_location, "point_index"))
def test_loading_spatial_rdd_from_disc(self):
point_rdd = load_spatial_rdd_from_disc(
self.sc, os.path.join(disc_location, "point"), GeoType.POINT
)
point_index_rdd = load_spatial_index_rdd_from_disc(self.sc, os.path.join(disc_location, "point_index"))
point_rdd.indexedRawRDD = point_index_rdd
assert point_rdd.indexedRawRDD is not None
assert isinstance(point_rdd, PointRDD)
point_rdd.analyze()
print(point_rdd.boundaryEnvelope)
polygon_rdd = load_spatial_rdd_from_disc(
self.sc, os.path.join(disc_location, "polygon"), GeoType.POLYGON
)
polygon_index_rdd = load_spatial_index_rdd_from_disc(self.sc, os.path.join(disc_location, "polygon_index"))
polygon_rdd.indexedRawRDD = polygon_index_rdd
polygon_rdd.analyze()
print(polygon_rdd.boundaryEnvelope)
assert polygon_rdd.indexedRawRDD is not None
assert isinstance(polygon_rdd, PolygonRDD)
linestring_rdd = load_spatial_rdd_from_disc(
self.sc, os.path.join(disc_location, "line_string"), GeoType.LINESTRING
)
linestring_index_rdd = load_spatial_index_rdd_from_disc(self.sc, os.path.join(disc_location, "line_string_index"))
linestring_rdd.indexedRawRDD = linestring_index_rdd
assert linestring_rdd.indexedRawRDD is not None
assert isinstance(linestring_rdd, LineStringRDD)
linestring_rdd.analyze()
print(linestring_rdd.boundaryEnvelope)
linestring_rdd.spatialPartitioning(GridType.KDBTREE)
polygon_rdd.spatialPartitioning(linestring_rdd.getPartitioner())
polygon_rdd.buildIndex(IndexType.RTREE, True)
linestring_rdd.buildIndex(IndexType.RTREE, True)
result = JoinQuery.SpatialJoinQuery(
linestring_rdd, polygon_rdd, True, True).collect()
print(result)
remove_directory(disc_location) |
299,945 | test gen grid | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import dft
from pyscf.dft import gen_grid
from pyscf.dft import radi
def setUpModule():
global h2o
h2o = gto.Mole()
h2o.verbose = 5
h2o.output = '/dev/null'
h2o.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
h2o.basis = {"H": '6-31g',
"O": '6-31g',}
h2o.build()
def tearDownModule():
global h2o
h2o.stdout.close()
del h2o
class KnownValues(unittest.TestCase):
def METHOD_NAME(self):
grid = gen_grid.Grids(h2o)
grid.prune = None
grid.radi_method = radi.gauss_chebyshev
grid.becke_scheme = gen_grid.original_becke
grid.radii_adjust = radi.becke_atomic_radii_adjust
grid.atomic_radii = radi.BRAGG_RADII
grid.alignment = 0
grid.atom_grid = {"H": (10, 50), "O": (10, 50),}
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.coords), 185.91245945279027, 9)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 1720.1317185648893, 8)
grid.becke_scheme = gen_grid.stratmann
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 1730.3692983091271, 8)
grid.atom_grid = {"O": (10, 50),}
grid.radii_adjust = None
grid.becke_scheme = gen_grid.stratmann
grid.kernel(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 2559.0064040257907, 8)
grid.atom_grid = (10, 11)
grid.becke_scheme = gen_grid.original_becke
grid.radii_adjust = None
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 1712.3069450297105, 8)
def test_radi(self):
grid = gen_grid.Grids(h2o)
grid.prune = None
grid.radii_adjust = radi.becke_atomic_radii_adjust
grid.atomic_radii = radi.COVALENT_RADII
grid.radi_method = radi.mura_knowles
grid.atom_grid = {"H": (10, 50), "O": (10, 50),}
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 1804.5437331817291, 9)
grid.radi_method = radi.delley
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 1686.3482864673697, 9)
grid.radi_method = radi.becke
grid.build(with_non0tab=False)
self.assertAlmostEqual(lib.fp(grid.weights), 818061.875131255, 7)
def test_prune(self):
grid = gen_grid.Grids(h2o)
grid.prune = gen_grid.sg1_prune
grid.atom_grid = {"H": (10, 50), "O": (10, 50),}
grid.alignment = 0
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.coords), 202.17732600266302, 9)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 442.54536463517167, 9)
grid.prune = gen_grid.nwchem_prune
grid.build(with_non0tab=False)
self.assertAlmostEqual(numpy.linalg.norm(grid.coords), 149.55023044392638, 9)
self.assertAlmostEqual(numpy.linalg.norm(grid.weights), 586.36841824004455, 9)
z = 16
rad, dr = radi.gauss_chebyshev(50)
angs = gen_grid.sg1_prune(z, rad, 434, radii=radi.SG1RADII)
self.assertAlmostEqual(lib.fp(angs), -291.0794420982329, 9)
angs = gen_grid.nwchem_prune(z, rad, 434, radii=radi.BRAGG_RADII)
self.assertAlmostEqual(lib.fp(angs), -180.12023039394498, 9)
angs = gen_grid.nwchem_prune(z, rad, 26, radii=radi.BRAGG_RADII)
self.assertTrue(numpy.all(angs==26))
def test_gen_atomic_grids(self):
grid = gen_grid.Grids(h2o)
grid.prune = None
grid.atom_grid = {"H": (10, 58), "O": (10, 50),}
self.assertRaises(ValueError, grid.build)
def test_make_mask(self):
grid = gen_grid.Grids(h2o)
grid.atom_grid = {"H": (10, 110), "O": (10, 110),}
grid.cutoff = 1e-15
grid.build()
coords = grid.coords*10.
non0 = gen_grid.make_mask(h2o, coords)
self.assertEqual((non0>0).sum(), 123)
self.assertAlmostEqual(lib.fp(non0), -83.54934301013405, 9)
def test_overwriting_grids_attribute(self):
g = gen_grid.Grids(h2o).run()
self.assertEqual(g.weights.size, 34312)
g.atom_grid = {"H": (10, 110), "O": (10, 110),}
self.assertTrue(g.weights is None)
def test_arg_group_coords(self):
mol = h2o
g = gen_grid.Grids(mol)
atom_grids_tab = g.gen_atomic_grids(
mol, g.atom_grid, g.radi_method, g.level, g.prune)
coords, weights = g.get_partition(
mol, atom_grids_tab, g.radii_adjust, g.atomic_radii, g.becke_scheme)
atom_coords = mol.atom_coords()
boundary = [atom_coords.min(axis=0)-gen_grid.GROUP_BOUNDARY_PENALTY,
atom_coords.max(axis=0)+gen_grid.GROUP_BOUNDARY_PENALTY]
box_size = gen_grid.GROUP_BOX_SIZE
boxes = ((boundary[1] - boundary[0]) * (1./box_size)).round().astype(int)
box_size = (boundary[1] - boundary[0]) / boxes
x_splits = numpy.append(numpy.append(
-numpy.inf, numpy.arange(boxes[0]+1) * box_size[0] + boundary[0][0]), numpy.inf)
y_splits = numpy.append(numpy.append(
-numpy.inf, numpy.arange(boxes[1]+1) * box_size[1] + boundary[0][1]), numpy.inf)
z_splits = numpy.append(numpy.append(
-numpy.inf, numpy.arange(boxes[2]+1) * box_size[2] + boundary[0][2]), numpy.inf)
idx = []
for x0, x1 in zip(x_splits[:-1], x_splits[1:]):
for y0, y1 in zip(y_splits[:-1], y_splits[1:]):
for z0, z1 in zip(z_splits[:-1], z_splits[1:]):
mask = ((x0 <= coords[:,0]) & (coords[:,0] < x1) &
(y0 <= coords[:,1]) & (coords[:,1] < y1) &
(z0 <= coords[:,2]) & (coords[:,2] < z1))
idx.append(numpy.where(mask)[0])
ref = numpy.hstack(idx)
idx = gen_grid.arg_group_grids(mol, coords)
self.assertTrue(abs(ref - idx).max() == 0)
if __name__ == "__main__":
print("Test Grids")
unittest.main() |
299,946 | vec | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.signal import convolve2d
# note, it uses opencv's convention of naming the pattern after 2x2 tile
# that starts in the second row and column of the sensors' matrix
class BayerPattern:
BGGR = 0
GBRG = 1
GRBG = 2
RGGB = 3
bayer_patterns = [BayerPattern.BGGR, BayerPattern.GBRG, BayerPattern.GRBG, BayerPattern.RGGB]
def blue_position(pattern):
assert 0 <= pattern <= 3
return pattern // 2, pattern % 2
def blue_position2pattern(blue_position):
y, x = blue_position
assert 0 <= x <= 1 and 0 <= y <= 1
return 2 * y + x
def rgb_bayer_masks(img_shape, pattern):
h, w = img_shape
assert h % 2 == 0 and w % 2 == 0, f"h: {h}, w: {w}"
assert 0 <= pattern <= 3
def sensor_matrix_00_is_green(pattern):
return pattern in (BayerPattern.GRBG, BayerPattern.GBRG)
def red_is_in_the_first_row(pattern):
return pattern in (BayerPattern.BGGR, BayerPattern.GBRG)
def METHOD_NAME(n, mod=2):
return np.arange(0, n, dtype=np.uint8) % mod
if sensor_matrix_00_is_green(pattern):
top_right_mask = np.outer(1 - METHOD_NAME(h), METHOD_NAME(w))
bottom_left_mask = np.outer(METHOD_NAME(h), 1 - METHOD_NAME(w))
green = 1 - top_right_mask - bottom_left_mask
if red_is_in_the_first_row(pattern):
return top_right_mask, green, bottom_left_mask
return bottom_left_mask, green, top_right_mask
else:
top_left_mask = np.outer(1 - METHOD_NAME(h), 1 - METHOD_NAME(w))
bottom_right_mask = np.outer(METHOD_NAME(h), METHOD_NAME(w))
green = 1 - top_left_mask - bottom_right_mask
if red_is_in_the_first_row(pattern):
return top_left_mask, green, bottom_right_mask
return bottom_right_mask, green, top_left_mask
def rgb2bayer(img, pattern):
h, w, c = img.shape
assert c == 3
h = h // 2 * 2
w = w // 2 * 2
r, g, b = rgb_bayer_masks((h, w), pattern)
return img[:h, :w, 0] * r + img[:h, :w, 1] * g + img[:h, :w, 2] * b
def rgb2bayer_seq(seq, patterns):
f, h, w, c = seq.shape
assert f == len(patterns)
assert c == 3
h = h // 2 * 2
w = w // 2 * 2
bayer_masks = {pattern: rgb_bayer_masks((h, w), pattern) for pattern in bayer_patterns}
seq_masks = [bayer_masks[pattern] for pattern in patterns]
reds, greens, blues = [np.stack(channel) for channel in zip(*seq_masks)]
return seq[:, :h, :w, 0] * reds + seq[:, :h, :w, 1] * greens + seq[:, :h, :w, 2] * blues
def conv2d_border101(img, filt):
r, s = filt.shape
assert r % 2 == 1 and s % 2 == 1
padded = np.pad(img, ((r // 2, r // 2), (s // 2, s // 2)), "reflect")
return convolve2d(padded, filt, mode="valid")
def conv2d_border101_seq(seq, filt):
r, s = filt.shape
assert r % 2 == 1 and s % 2 == 1
padded = np.pad(seq, ((0, 0), (r // 2, r // 2), (s // 2, s // 2)), "reflect")
debayered_frames = [convolve2d(frame, filt, mode="valid") for frame in padded]
return np.stack(debayered_frames)
def debayer_bilinear_npp_masks(img, masks):
"""
Computes the "bilinear with chroma correction for green channel" as
defined by the NPP.
"""
in_dtype = img.dtype
ndim = len(img.shape)
assert ndim in (2, 3)
is_seq = ndim == 3
conv = conv2d_border101 if not is_seq else conv2d_border101_seq
red_mask, green_mask, blue_mask = masks
red_signal = img * red_mask
green_signal = img * green_mask
blue_signal = img * blue_mask
# When inferring red color for blue or green base, there are either
# four red base neigbours at four corners or two base neigbours in
# x or y axis. The blue color case is analogous.
rb_filter = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]], dtype=np.int32)
green_x_filter = np.array([[1, 0, 1]], dtype=np.int32)
green_y_filter = green_x_filter.transpose()
green_filter = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]], dtype=np.int32)
red = conv(red_signal, rb_filter) // 4
blue = conv(blue_signal, rb_filter) // 4
green_bilinear = conv(green_signal, green_filter) // 4
green_x = conv(green_signal, green_x_filter) // 2
green_y = conv(green_signal, green_y_filter) // 2
def green_with_chroma_correlation(color_signal):
# For red and blue based positions, there are always four
# green neighbours (y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1).
# NPP does not simply average 4 of them to get green intensity.
# Instead, it averages only two in either y or x axis as explained in
# https://docs.nvidia.com/cuda/npp/group__image__color__debayer.html
# I.e. the axis is chosen by looking at:
# * abs(color(x, y), avg(color(y - 2, x), color(y + 2, x))) and
# * abs(color(x, y), avg(color(y, x - 2), color(y, x - 2)))
# and choosing the axis where the difference is smaller.
# In other words, if we are inferring green color for blue(red)-based
# position we check in which axis the blue(red) intensity changes less
# and pick that axis.
diff_filter_x = np.array([[1, 0, 0, 0, 1]], dtype=np.int32)
diff_filter_y = diff_filter_x.transpose()
# First compute the average, then the difference. Doing it with a single
# conv yields different results (and as this servers as a mask,
# it results in substantial differences in the end)
x_avg = conv(color_signal, diff_filter_x) // 2
y_avg = conv(color_signal, diff_filter_y) // 2
diff_x = np.abs(color_signal - x_avg)
diff_y = np.abs(color_signal - y_avg)
return diff_x < diff_y, diff_x > diff_y
pick_x_red_based, pick_y_red_based = green_with_chroma_correlation(red_signal)
pick_x_blue_based, pick_y_blue_based = green_with_chroma_correlation(blue_signal)
pick_x = pick_x_red_based + pick_x_blue_based
pick_y = pick_y_red_based + pick_y_blue_based
green = pick_x * green_x + pick_y * green_y + (1 - pick_x - pick_y) * green_bilinear
return np.stack([red, green, blue], axis=ndim).astype(in_dtype)
def debayer_bilinear_npp_pattern(img, pattern):
h, w = img.shape
masks = rgb_bayer_masks((h, w), pattern)
return debayer_bilinear_npp_masks(img, masks)
def debayer_bilinear_npp_pattern_seq(seq, patterns):
f, h, w = seq.shape
assert f == len(patterns)
bayer_masks = {pattern: rgb_bayer_masks((h, w), pattern) for pattern in bayer_patterns}
seq_masks = [bayer_masks[pattern] for pattern in patterns]
reds, greens, blues = [np.stack(channel) for channel in zip(*seq_masks)]
return debayer_bilinear_npp_masks(seq, (reds, greens, blues)) |
299,947 | test try remove all membership attr values | # --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2016-2017 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
import pytest
from lib389.tests.cli import topology as default_topology
from lib389.cli_base import LogCapture, FakeArgs
from lib389.plugins import ReferentialIntegrityPlugin
from lib389.cli_conf.plugins import referint as referint_cli
@pytest.fixture(scope="module")
def topology(request):
topology = default_topology(request)
plugin = ReferentialIntegrityPlugin(topology.standalone)
if not plugin.exists():
plugin.create()
# we need to restart the server after enabling the plugin
plugin.enable()
topology.standalone.restart()
topology.logcap.flush()
return topology
def test_set_update_delay(topology):
args = FakeArgs()
args.value = 60
referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains('referint-update-delay set to "60"')
topology.logcap.flush()
args.value = None
referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains("referint-update-delay: 60")
topology.logcap.flush()
args.value = 0
referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains('referint-update-delay set to "0"')
topology.logcap.flush()
args.value = None
referint_cli.manage_update_delay(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains("referint-update-delay: 0")
topology.logcap.flush()
def test_add_membership_attr(topology):
args = FakeArgs()
args.value = "member2"
referint_cli.add_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains("successfully added membership attribute")
topology.logcap.flush()
referint_cli.display_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains(": member2")
topology.logcap.flush()
def test_add_membership_attr_with_value_that_already_exists(topology):
plugin = ReferentialIntegrityPlugin(topology.standalone)
# setup test
if not "uniqueMember" in plugin.get_membership_attr():
plugin.add_membership_attr("uniqueMember")
args = FakeArgs()
args.value = "uniqueMember"
referint_cli.add_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains("already exists")
topology.logcap.flush()
def test_remove_membership_attr_with_value_that_exists(topology):
plugin = ReferentialIntegrityPlugin(topology.standalone)
# setup test
if not "uniqueMember" in plugin.get_membership_attr():
plugin.add_membership_attr("uniqueMember")
args = FakeArgs()
args.value = "uniqueMember"
referint_cli.remove_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains("successfully removed membership attribute")
topology.logcap.flush()
referint_cli.display_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert not topology.logcap.contains(": uniqueMember")
topology.logcap.flush()
def test_remove_membership_attr_with_value_that_doesnt_exist(topology):
args = FakeArgs()
args.value = "whatever"
referint_cli.remove_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains('No value "{0}" found'.format(args.value))
topology.logcap.flush()
def METHOD_NAME(topology):
plugin = ReferentialIntegrityPlugin(topology.standalone)
#setup test
membership_values = plugin.get_membership_attr()
assert len(membership_values) > 0
for val in membership_values[:-1]:
plugin.remove_membership_attr(val)
args = FakeArgs()
args.value = membership_values[-1]
referint_cli.remove_membership_attr(topology.standalone, None, topology.logcap.log, args)
assert topology.logcap.contains("Error: Failed to delete. At least one value for membership attribute should exist.")
topology.logcap.flush() |
299,948 | test propagation credentials endpoint get stolen | import json
from http import HTTPStatus
from typing import Sequence
from urllib.parse import urljoin
import pytest
from tests.common import StubDIContainer
from tests.data_for_tests.propagation_credentials import LM_HASH, NT_HASH, PASSWORD_1, PASSWORD_2
from tests.monkey_island import InMemoryCredentialsRepository
from common.credentials import Credentials, LMHash, NTHash, Password
from monkey_island.cc.repositories import ICredentialsRepository
from monkey_island.cc.resources import PropagationCredentials
from monkey_island.cc.resources.propagation_credentials import (
_configured_collection,
_stolen_collection,
)
ALL_CREDENTIALS_URL = PropagationCredentials.urls[0]
CONFIGURED_CREDENTIALS_URL = urljoin(ALL_CREDENTIALS_URL + "/", _configured_collection)
STOLEN_CREDENTIALS_URL = urljoin(ALL_CREDENTIALS_URL + "/", _stolen_collection)
CREDENTIALS_1 = Credentials(identity=None, secret=Password(password=PASSWORD_1))
CREDENTIALS_2 = Credentials(identity=None, secret=LMHash(lm_hash=LM_HASH))
CREDENTIALS_3 = Credentials(identity=None, secret=NTHash(nt_hash=NT_HASH))
CREDENTIALS_4 = Credentials(identity=None, secret=Password(password=PASSWORD_2))
@pytest.fixture
def credentials_repository():
return InMemoryCredentialsRepository()
@pytest.fixture
def flask_client(build_flask_client, credentials_repository):
container = StubDIContainer()
container.register_instance(ICredentialsRepository, credentials_repository)
with build_flask_client(container) as flask_client:
yield flask_client
def test_propagation_credentials_endpoint_get(flask_client, credentials_repository):
credentials_repository.save_configured_credentials([CREDENTIALS_1, CREDENTIALS_2])
credentials_repository.save_stolen_credentials([CREDENTIALS_3, CREDENTIALS_4])
resp = flask_client.get(ALL_CREDENTIALS_URL)
actual_propagation_credentials = [Credentials(**creds) for creds in resp.json]
assert resp.status_code == HTTPStatus.OK
assert len(actual_propagation_credentials) == 4
assert CREDENTIALS_1 in actual_propagation_credentials
assert CREDENTIALS_2 in actual_propagation_credentials
assert CREDENTIALS_3 in actual_propagation_credentials
assert CREDENTIALS_4 in actual_propagation_credentials
def pre_populate_repository(
url: str, credentials_repository: ICredentialsRepository, credentials: Sequence[Credentials]
):
if "configured" in url:
credentials_repository.save_configured_credentials(credentials)
else:
credentials_repository.save_stolen_credentials(credentials)
@pytest.mark.parametrize("url", [CONFIGURED_CREDENTIALS_URL, STOLEN_CREDENTIALS_URL])
def METHOD_NAME(flask_client, credentials_repository, url):
pre_populate_repository(url, credentials_repository, [CREDENTIALS_1, CREDENTIALS_2])
resp = flask_client.get(url)
actual_propagation_credentials = [Credentials(**creds) for creds in resp.json]
assert resp.status_code == HTTPStatus.OK
assert len(actual_propagation_credentials) == 2
assert actual_propagation_credentials[0].secret.password == PASSWORD_1
assert actual_propagation_credentials[1].secret.lm_hash == LM_HASH
def test_configured_propagation_credentials_endpoint_put(flask_client, credentials_repository):
pre_populate_repository(
CONFIGURED_CREDENTIALS_URL,
credentials_repository,
[CREDENTIALS_1, CREDENTIALS_2],
)
resp = flask_client.put(CONFIGURED_CREDENTIALS_URL, json=[])
assert resp.status_code == HTTPStatus.NO_CONTENT
resp = flask_client.get(CONFIGURED_CREDENTIALS_URL)
assert len(json.loads(resp.text)) == 0
def test_stolen_propagation_credentials_endpoint__put_not_allowed(flask_client):
resp = flask_client.put(STOLEN_CREDENTIALS_URL, json=[])
assert resp.status_code == HTTPStatus.METHOD_NOT_ALLOWED
def test_all_propagation_credentials_endpoint__put_not_allowed(flask_client):
resp = flask_client.put(ALL_CREDENTIALS_URL, json=[])
assert resp.status_code == HTTPStatus.METHOD_NOT_ALLOWED
NON_EXISTENT_COLLECTION_URL = urljoin(ALL_CREDENTIALS_URL + "/", "bogus-credentials")
def test_propagation_credentials_endpoint__get_not_found(flask_client):
resp = flask_client.get(NON_EXISTENT_COLLECTION_URL)
assert resp.status_code == HTTPStatus.NOT_FOUND
def test_propagation_credentials_endpoint__put_not_found(flask_client):
resp = flask_client.put(NON_EXISTENT_COLLECTION_URL, json=[])
assert resp.status_code == HTTPStatus.NOT_FOUND |
299,949 | run | import datetime
import logging
import localflavor
from paying_for_college.models.disclosures import (
DEFAULT_EXCLUSIONS,
HIGHEST_DEGREES,
School,
)
STATES = sorted(
[tup[0] for tup in localflavor.us.us_states.CONTIGUOUS_STATES]
+ [tup[0] for tup in localflavor.us.us_states.NON_CONTIGUOUS_STATES]
+ ["PR"]
)
DEGREE_COHORTS = {k: [] for k in HIGHEST_DEGREES.keys()}
logger = logging.getLogger(__name__)
def get_grad_level(school):
"""Consider degrees higher than graduate level '4' as graduate degrees."""
if int(school.degrees_highest) > 4:
return "4"
else:
return school.degrees_highest
def build_base_cohorts():
"""
Pre-build the base highest-degree cohorts.
DEFAULT_EXCLUSIONS are the primary keys for the home offices of schools
or school systems, plus our fake demo school, 999999.
"""
global DEGREE_COHORTS
base_query = (
School.objects.filter(operating=True, state__in=STATES)
.exclude(pk__in=DEFAULT_EXCLUSIONS)
.exclude(degrees_highest="")
)
for key in DEGREE_COHORTS:
DEGREE_COHORTS[key] += [
school for school in base_query if get_grad_level(school) == key
]
return base_query
def calculate_percentile_rank(array, score):
"""Get a school score's percentile rank from an array of cohort scores."""
true_false_array = [value <= score for value in array]
if len(true_false_array) == 0:
return
raw_rank = float(sum(true_false_array)) / len(true_false_array)
return int(round(raw_rank * 100))
def rank_by_metric(school, cohort, metric):
"""Return a school's percentile rank among a cohort by 3 metrics."""
values = [
getattr(s, metric) for s in cohort if getattr(s, metric) is not None
]
payload = {"cohort_count": len(values)}
array = [float(val) for val in values]
target_value = float(getattr(school, metric))
payload.update(
{"percentile_rank": calculate_percentile_rank(array, target_value)}
)
return payload
def METHOD_NAME(single_school=None):
"""Get percentile rankings for schools by degree, control, and state."""
count = 0
starter = datetime.datetime.now()
base_query = build_base_cohorts()
if single_school:
base_query = base_query.filter(pk=single_school)
for school in base_query:
by_degree = {}
by_state = {}
by_control = {}
count += 1
if count % 500 == 0: # pragma: no cover
logger.info("{} schools processed".format(count))
# degree_cohort is the default, national base cohort
# base query weeds out schools without state or degrees_highest values
degree_cohort = DEGREE_COHORTS.get(get_grad_level(school))
state_cohort = [
s
for s in degree_cohort
if s and s.state and s.state == school.state
]
# For school control, we want cohorts only for public and private;
# We do not want a special cohort of for-profit schools
if not school.control:
control_cohort = None
elif school.control == "Public":
control_cohort = [
s for s in degree_cohort if s.control == school.control
]
else:
control_cohort = [
s for s in degree_cohort if s.control != "Public"
]
for metric in ["grad_rate", "repay_3yr", "median_total_debt"]:
if getattr(school, metric) is None:
by_state.update({metric: None})
by_control.update({metric: None})
by_degree.update({metric: None})
else:
if state_cohort:
by_state.update(
{metric: rank_by_metric(school, state_cohort, metric)}
)
if control_cohort:
by_control.update(
{
metric: rank_by_metric(
school, control_cohort, metric
)
}
)
if degree_cohort:
by_degree.update(
{metric: rank_by_metric(school, degree_cohort, metric)}
)
school.cohort_ranking_by_state = by_state
school.cohort_ranking_by_control = by_control
school.cohort_ranking_by_highest_degree = by_degree
school.save()
logger.info(
"\nCohort script took {} to process {} schools".format(
datetime.datetime.now() - starter, count
)
) |
299,950 | registrator | """Registrators that allow pluggable data to logic transforms."""
__all__ = ["registrator"]
from typing import Callable, MutableMapping, Optional, Sequence, Text, Union
def name_variations(*args):
"""Standard name variations when registering functions with MUSE."""
def camelCase(name):
comps = name.split("_")
return comps[0] + "".join(x.title() for x in comps[1:])
def CamelCase(name): # noqa
return "".join(x.title() for x in name.split("_"))
def kebab_case(name):
return name.replace("_", "-")
def nospacecase(name):
return name.replace("_", "")
# keep ordered function name because first one is the most likely variation.
names = [a for a in args if a is not None]
names += (
[camelCase(n) for n in names]
+ [CamelCase(n) for n in names]
+ [kebab_case(n) for n in names]
+ [nospacecase(n) for n in names]
)
ordered = []
for n in names:
if n not in ordered:
ordered.append(n)
return ordered
def METHOD_NAME(
decorator: Callable = None,
registry: MutableMapping = None,
logname: Optional[Text] = None,
loglevel: Optional[Text] = "Debug",
) -> Callable:
"""A decorator to create a decorator that registers functions with MUSE.
This is a decorator that takes another decorator as an argument. Hence it
returns a decorator. It simplifies and standardizes creating decorators to
register functions with muse.
The registrator expects as non-optional keyword argument a registry where
the resulting decorator will register functions.
Furthermore, the final function (the one passed to the decorator passed to
this function) will emit a standardized log-call.
Example:
At it's simplest, creating a registrator and registrating happens by
first declaring a registry.
>>> REGISTRY = {}
In general, it will be a variable owned directly by a module, hence the
all-caps. Creating the registrator then follows:
>>> from muse.registration import registrator
>>> @registrator(registry=REGISTRY, logname='my stuff',
... loglevel='Info')
... def register_mystuff(function):
... return function
This registrator does nothing more than register the function. A more
interesting example is given below. Then a function can be registered:
>>> @register_mystuff(name='yoyo')
... def my_registered_function(a, b):
... return a + b
The argument 'yoyo' is optional. It adds aliases for the function in the
registry. In any case, functions are registered with default aliases
corresponding to standard name variations, e.g. CamelCase, camelCase,
and kebab-case, as illustrated below:
>>> REGISTRY['my_registered_function'] is my_registered_function
True
>>> REGISTRY['my-registered-function'] is my_registered_function
True
>>> REGISTRY['yoyo'] is my_registered_function
True
A more interesting case would involve the registrator automatically
adding functionality to the input function. For instance, the inputs
could be manipulated and the result of the function could be
automatically transformed to a string:
>>> from muse.registration import registrator
>>> @registrator(registry=REGISTRY)
... def register_mystuff(function):
... from functools import wraps
...
... @wraps(function)
... def decorated(a, b) -> str:
... result = function(2 * a, 3 * b)
... return str(result)
...
... return decorated
>>> @register_mystuff
... def other(a, b):
... return a + b
>>> isinstance(REGISTRY['other'](-3, 2), str)
True
>>> REGISTRY['other'](-3, 2) == "0"
True
"""
from functools import wraps
# allows specifyng the registered name as a keyword argument
if decorator is None:
return lambda x: METHOD_NAME(
x, loglevel=loglevel, logname=logname, registry=registry
)
if registry is None:
raise Exception("registry keyword must be given and cannot be None")
if logname is None:
logname = decorator.__name__.replace("register_", "")
@wraps(decorator)
def register(
function=None,
name: Optional[Union[Text, Sequence[Text]]] = None,
vary_name: bool = True,
overwrite: bool = False,
):
from inspect import isclass, signature
from itertools import chain
from logging import getLogger
# allows specifyng the registered name as a keyword argument
if function is None:
return lambda x: register(
x, name=name, vary_name=vary_name, overwrite=overwrite
)
if name is None:
names = [function.__name__]
elif isinstance(name, Text):
names = [name, function.__name__]
else:
names = list(name) + [function.__name__]
# all registered filters will use the same logger, at least for the
# default logging done in the decorated function
logger = getLogger(function.__module__)
msg = "Computing {}: {}".format(logname, names[0])
assert decorator is not None
if "name" in signature(decorator).parameters:
inner_decorated = decorator(function, names[0])
else:
inner_decorated = decorator(function)
if not isclass(function):
@wraps(function)
def decorated(*args, **kwargs):
if loglevel is not None and hasattr(logger, loglevel):
getattr(logger, loglevel)(msg)
result = inner_decorated(*args, **kwargs)
return result
else:
decorated = function
# There's just one name for the decorator
assert registry is not None
if not vary_name:
if function.__name__ in registry and not overwrite:
msg = f"A {logname} with the name {function.__name__} already exists"
getLogger(__name__).warning(msg)
return
registry[function.__name__] = decorated
else:
for n in chain(name_variations(function.__name__, *names)):
if n in registry and not overwrite:
msg = f"A {logname} with the name {n} already exists"
getLogger(__name__).warning(msg)
return
registry[n] = decorated
return decorated
return register |
299,951 | name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionByWorkspaceResult',
'AwaitableGetPrivateEndpointConnectionByWorkspaceResult',
'get_private_endpoint_connection_by_workspace',
'get_private_endpoint_connection_by_workspace_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionByWorkspaceResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, METHOD_NAME=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(METHOD_NAME="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(METHOD_NAME="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionByWorkspaceResult(GetPrivateEndpointConnectionByWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionByWorkspaceResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection_by_workspace(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionByWorkspaceResult:
"""
Get a private endpoint connection.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:desktopvirtualization/v20230707preview:getPrivateEndpointConnectionByWorkspace', __args__, opts=opts, typ=GetPrivateEndpointConnectionByWorkspaceResult).value
return AwaitableGetPrivateEndpointConnectionByWorkspaceResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection_by_workspace)
def get_private_endpoint_connection_by_workspace_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionByWorkspaceResult]:
"""
Get a private endpoint connection.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace
"""
... |
299,952 | benchmark native unbatch | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UnbatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.unbatch()`."""
def METHOD_NAME(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="native_batch_size_%d" %
batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmark_old_unbatch_implementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="unfused_batch_size_%d" %
batch_size)
if __name__ == "__main__":
test.main() |
299,953 | test can subscribe to startup hooks | # Copyright (c) 2009 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
from multiprocessing import Value
import pytest
import libqtile.log_utils
import libqtile.utils
from libqtile import hook
from libqtile.resources import default_config
from test.conftest import BareConfig
# TODO: more tests required.
# 1. Check all hooks that can be fired
class Call:
def __init__(self, val):
self.val = val
def __call__(self, val):
self.val = val
class NoArgCall(Call):
def __call__(self):
self.val += 1
@pytest.fixture
def hook_fixture():
libqtile.log_utils.init_log()
yield
hook.clear()
def test_cannot_fire_unknown_event():
with pytest.raises(libqtile.utils.QtileError):
hook.fire("unknown")
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber():
test = Call(0)
hook.subscribe.group_window_add(test)
hook.fire("group_window_add", 8)
assert test.val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co)
hook.fire("group_window_add", 8)
assert val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async_co():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire("group_window_add")
assert val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async_in_existing_loop():
async def t():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire("group_window_add")
await asyncio.sleep(0)
assert val == 8
asyncio.run(t())
@pytest.mark.usefixtures("hook_fixture")
def test_subscribers_can_be_added_removed():
test = Call(0)
hook.subscribe.group_window_add(test)
assert hook.subscriptions
hook.clear()
assert not hook.subscriptions
@pytest.mark.usefixtures("hook_fixture")
def test_can_unsubscribe_from_hook():
test = Call(0)
hook.subscribe.group_window_add(test)
hook.fire("group_window_add", 3)
assert test.val == 3
hook.unsubscribe.group_window_add(test)
hook.fire("group_window_add", 4)
assert test.val == 3
def METHOD_NAME(manager_nospawn):
config = BareConfig
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
manager = manager_nospawn
manager.startup_once_calls = Value("i", 0)
manager.startup_calls = Value("i", 0)
manager.startup_complete_calls = Value("i", 0)
def inc_startup_once_calls():
manager.startup_once_calls.value += 1
def inc_startup_calls():
manager.startup_calls.value += 1
def inc_startup_complete_calls():
manager.startup_complete_calls.value += 1
hook.subscribe.startup_once(inc_startup_once_calls)
hook.subscribe.startup(inc_startup_calls)
hook.subscribe.startup_complete(inc_startup_complete_calls)
manager.start(config)
assert manager.startup_once_calls.value == 1
assert manager.startup_calls.value == 1
assert manager.startup_complete_calls.value == 1
# Restart and check that startup_once doesn't fire again
manager.terminate()
manager.start(config, no_spawn=True)
assert manager.startup_once_calls.value == 1
assert manager.startup_calls.value == 2
assert manager.startup_complete_calls.value == 2
@pytest.mark.usefixtures("hook_fixture")
def test_can_update_by_selection_change(manager):
test = Call(0)
hook.subscribe.selection_change(test)
hook.fire("selection_change", "hello")
assert test.val == "hello"
@pytest.mark.usefixtures("hook_fixture")
def test_can_call_by_selection_notify(manager):
test = Call(0)
hook.subscribe.selection_notify(test)
hook.fire("selection_notify", "hello")
assert test.val == "hello"
@pytest.mark.usefixtures("hook_fixture")
def test_resume_hook(manager):
test = NoArgCall(0)
hook.subscribe.resume(test)
hook.fire("resume")
assert test.val == 1 |
299,954 | version | # Copyright 2023 Avaiga Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import uuid
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Set, Union
from taipy.config.common._template_handler import _TemplateHandler as _tpl
from taipy.config.common._validate_id import _validate_id
from taipy.config.common.scope import Scope
from .._entity._entity import _Entity
from .._entity._labeled import _Labeled
from .._entity._properties import _Properties
from .._entity._reload import _Reloader, _self_reload, _self_setter
from .._version._version_manager_factory import _VersionManagerFactory
from ..data._data_manager_factory import _DataManagerFactory
from ..data.data_node import DataNode
from ..exceptions.exceptions import NonExistingDataNode
from .task_id import TaskId
if TYPE_CHECKING:
from ..job.job import Job
class Task(_Entity, _Labeled):
"""Hold a user function that will be executed, its parameters and the results.
A `Task` brings together the user code as function, the inputs and the outputs as data nodes
(instances of the `DataNode^` class).
Attributes:
config_id (str): The identifier of the `TaskConfig^`.
properties (dict[str, Any]): A dictionary of additional properties.
function (callable): The python function to execute. The _function_ must take as parameter the
data referenced by inputs data nodes, and must return the data referenced by outputs data nodes.
input (Union[DataNode^, List[DataNode^]]): The list of inputs.
output (Union[DataNode^, List[DataNode^]]): The list of outputs.
id (str): The unique identifier of the task.
owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or None.
parent_ids (Optional[Set[str]]): The set of identifiers of the parent sequences.
version (str): The string indicates the application version of the task to instantiate. If not provided, the
latest version is used.
skippable (bool): If True, indicates that the task can be skipped if no change has been made on inputs. The
default value is _False_.
"""
_ID_PREFIX = "TASK"
__ID_SEPARATOR = "_"
_MANAGER_NAME = "task"
def __init__(
self,
config_id: str,
properties: Dict[str, Any],
function,
input: Optional[Iterable[DataNode]] = None,
output: Optional[Iterable[DataNode]] = None,
id: Optional[TaskId] = None,
owner_id: Optional[str] = None,
parent_ids: Optional[Set[str]] = None,
METHOD_NAME: Optional[str] = None,
skippable: bool = False,
):
self.config_id = _validate_id(config_id)
self.id = id or TaskId(self.__ID_SEPARATOR.join([self._ID_PREFIX, self.config_id, str(uuid.uuid4())]))
self.owner_id = owner_id
self._parent_ids = parent_ids or set()
self.__input = {dn.config_id: dn for dn in input or []}
self.__output = {dn.config_id: dn for dn in output or []}
self._function = function
self._version = METHOD_NAME or _VersionManagerFactory._build_manager()._get_latest_version()
self._skippable = skippable
self._properties = _Properties(self, **properties)
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
def __getstate__(self):
return vars(self)
def __setstate__(self, state):
vars(self).update(state)
def __getattr__(self, attribute_name):
protected_attribute_name = _validate_id(attribute_name)
if protected_attribute_name in self._properties:
return _tpl._replace_templates(self._properties[protected_attribute_name])
if protected_attribute_name in self.input:
return self.input[protected_attribute_name]
if protected_attribute_name in self.output:
return self.output[protected_attribute_name]
raise AttributeError(f"{attribute_name} is not an attribute of task {self.id}")
@property
def properties(self):
self._properties = _Reloader()._reload(self._MANAGER_NAME, self)._properties
return self._properties
def get_parents(self):
"""Get parents of the task."""
from ... import core as tp
return tp.get_parents(self)
@property # type: ignore
@_self_reload(_MANAGER_NAME)
def parent_ids(self):
return self._parent_ids
@property
def input(self) -> Dict[str, DataNode]:
return self.__input
@property
def output(self) -> Dict[str, DataNode]:
return self.__output
@property
def data_nodes(self) -> Dict[str, DataNode]:
return {**self.input, **self.output}
@property # type: ignore
@_self_reload(_MANAGER_NAME)
def function(self):
return self._function
@function.setter # type: ignore
@_self_setter(_MANAGER_NAME)
def function(self, val):
self._function = val
@property # type: ignore
@_self_reload(_MANAGER_NAME)
def skippable(self):
return self._skippable
@skippable.setter # type: ignore
@_self_setter(_MANAGER_NAME)
def skippable(self, val):
self._skippable = val
@property
def scope(self) -> Scope:
"""Retrieve the lowest scope of the task based on its data nodes.
Returns:
The lowest scope present in input and output data nodes or GLOBAL if there are
either no input or no output.
"""
data_nodes = list(self.__input.values()) + list(self.__output.values())
scope = Scope(min(dn.scope for dn in data_nodes)) if len(data_nodes) != 0 else Scope.GLOBAL
return scope
@property
def METHOD_NAME(self):
return self._version
def submit(
self,
callbacks: Optional[List[Callable]] = None,
force: bool = False,
wait: bool = False,
timeout: Optional[Union[float, int]] = None,
) -> "Job": # noqa
"""Submit the task for execution.
Parameters:
callbacks (List[Callable]): The list of callable functions to be called on status
change.
force (bool): Force execution even if the data nodes are in cache.
wait (bool): Wait for the orchestrated job created from the task submission to be finished in asynchronous
mode.
timeout (Union[float, int]): The maximum number of seconds to wait for the job to be finished before
returning.
Returns:
The created `Job^`.
"""
from ._task_manager_factory import _TaskManagerFactory
return _TaskManagerFactory._build_manager()._submit(self, callbacks, force, wait, timeout)
def get_label(self) -> str:
"""Returns the task simple label prefixed by its owner label.
Returns:
The label of the task as a string.
"""
return self._get_label()
def get_simple_label(self) -> str:
"""Returns the task simple label.
Returns:
The simple label of the task as a string.
"""
return self._get_simple_label() |
299,955 | test basic map operations | from javatests import Dict2JavaTest
import unittest, test.test_support
# Test the java.util.Map interface of org.python.core.PyDictionary.
# This tests the functionality of being able to pass a dictionaries
# created in Jython to a java method, and the ability to manipulate
# the dictionary object once in Java code. The Java Dict2JavaTest is
# used to run some tests in Java code since they cannot be done on
# the Jython side.
class JythonMapInJavaTest(unittest.TestCase):
def checkcontains(self, keys):
for k in keys:
self.failUnless(k in self.testdict)
self.failUnless(self.testmap.containsKey(k))
def checkdoesntcontain(self, keys):
for k in keys:
self.failIf(k in self.testdict)
self.failIf(self.testmap.containsKey(k))
def checkvalues(self, *keyvalues):
for k, v in keyvalues:
self.assertEquals(v, self.testdict[k])
def checksize(self, correctsize):
self.assertEquals(self.testmap.size(), len(self.testdict))
self.assertEquals(self.testmap.size(), correctsize)
def maketestdict(self, base):
self.testdict = base
self.testmap = Dict2JavaTest(self.testdict)
def METHOD_NAME(self):
self.maketestdict({"a":"x", "b":"y", "c":"z", "d": None, None: "foo"})
# Make sure we see it on the java side
self.assertEquals(len(self.testdict), self.testmap.size())
self.checkcontains('abcd')
# Add {"e":"1", "f":null, "g":"2"} using the Map.putAll method
oldlen = len(self.testdict)
self.failUnless(self.testmap.test_putAll_efg())
self.checksize(oldlen + 3)
self.checkvalues(('e', '1'), ('f', None), ('g', '2'))
# test Map.get method, get "g" and "d" test will throw an exception if fail
self.failUnless(self.testmap.test_get_gd())
# remove elements with keys "a" and "c" with the Map.remove method
oldlen = len(self.testdict)
self.failUnless(self.testmap.test_remove_ac())
self.checksize(oldlen - 2)
self.checkdoesntcontain('ac')
# test Map.put method, adds {"h":null} and {"i": Integer(3)} and {"g": "3"}
# "g" replaces a previous value of "2"
oldlen = len(self.testdict)
self.failUnless(self.testmap.test_put_hig())
self.checksize(oldlen + 2)
self.checkvalues(('h', None), ('i', 3), ('g', '3'))
self.failUnless(self.testmap.test_java_mapentry())
def test_entryset(self):
self.maketestdict({"h":"x", "b":"y", "g":"z", "e": None, None: "foo", "d":7})
set = self.testmap.entrySet()
self.checksize(set.size())
# Make sure the set is consistent with the self.testdictionary
for entry in set:
self.failUnless(self.testdict.has_key(entry.getKey()))
self.assertEquals(self.testdict[entry.getKey()], entry.getValue())
self.failUnless(set.contains(entry))
# make sure changes in the set are reflected in the self.testdictionary
for entry in set:
if entry.getKey() == "h":
hentry = entry
if entry.getKey() == "e":
eentry = entry
# Make sure nulls and non Map.Entry object do not match anything in the set
self.failUnless(self.testmap.test_entry_set_nulls())
self.failUnless(set.remove(eentry))
self.failIf(set.contains(eentry))
self.failIf("e" in self.testdict)
self.failUnless(set.remove(hentry))
self.failIf(set.contains(hentry))
self.failIf("h" in self.testdict)
self.checksize(set.size())
oldlen = set.size()
self.failIf(set.remove(eentry))
self.checksize(oldlen)
# test Set.removeAll method
oldlen = len(self.testdict)
elist = [ entry for entry in set if entry.key in ["b", "g", "d", None]]
self.assertEqual(len(elist), 4)
self.failUnless(set.removeAll(elist))
self.checkdoesntcontain('bdg')
# can't check for None in self.testmap, so do it just for testdict
self.failIf(None in self.testdict)
self.checksize(oldlen - 4)
itr = set.iterator()
while (itr.hasNext()):
val = itr.next()
itr.remove()
self.failUnless(set.isEmpty())
self.checksize(0)
def test_keyset(self):
self.maketestdict({})
self.testmap.put("foo", "bar")
self.testmap.put("num", 5)
self.testmap.put(None, 4.3)
self.testmap.put(34, None)
keyset = self.testmap.keySet()
self.checksize(4)
self.failUnless(keyset.remove(None))
self.checksize(3)
self.failIf(keyset.contains(None))
self.failUnless(keyset.remove(34))
self.checksize(2)
self.failIf(keyset.contains(34))
itr = keyset.iterator()
while itr.hasNext():
key = itr.next()
if key == "num":
itr.remove()
self.checksize(1)
def test_values(self):
self.maketestdict({})
self.testmap.put("foo", "bar")
self.testmap.put("num", "bar")
self.testmap.put(None, 3.2)
self.testmap.put(34, None)
values = self.testmap.values()
self.assertEquals(values.size(), len(self.testdict))
self.checksize(4)
self.failUnless(values.remove(None))
self.checksize(3)
self.assertEquals(values.size(), len(self.testdict))
itr = values.iterator()
while itr.hasNext():
val = itr.next()
if val == "bar":
itr.remove()
self.checksize(1)
self.assertEquals(values.size(), len(self.testdict))
values.clear()
self.failUnless(values.isEmpty())
self.checksize(0)
def test_main():
test.test_support.run_unittest(JythonMapInJavaTest)
if __name__ == '__main__':
test_main() |
299,956 | test structure parse csv with error | import os
from httpx import AsyncClient
from tests.conftest import test_dir
async def test_parse_csv(
test_client: AsyncClient,
csv_beneficiary_filepath: str,
get_manager_jwt_93: str,
):
with open(csv_beneficiary_filepath, "rb") as file:
response = await test_client.post(
"/v1/convert-file/beneficiaries",
files={"upload_file": ("filename", file, "text/csv")},
headers={"Authorization": "Bearer " + get_manager_jwt_93},
)
assert response.status_code == 200
assert response.json()[0]["data"]["Identifiant dans le SI*"] == "1234"
assert response.json()[0]["data"]["Prénom*"] == "Charlotte"
async def test_parse_csv_with_all_date_formats(
test_client: AsyncClient,
csv_beneficiary_with_all_date_formats_filepath: str,
get_manager_jwt_93: str,
):
with open(csv_beneficiary_with_all_date_formats_filepath, "rb") as file:
response = await test_client.post(
"/v1/convert-file/beneficiaries",
files={"upload_file": ("filename", file, "text/csv")},
headers={"Authorization": "Bearer " + get_manager_jwt_93},
)
assert response.json()[0]["data"]["Identifiant dans le SI*"] == "1234"
assert response.json()[0]["data"]["Prénom*"] == "Charlotte"
assert response.json()[0]["data"]["Date de naissance*"] == "1998-05-25"
assert response.json()[1]["data"]["Date de naissance*"] == "1997-04-22"
async def test_parse_csv_errors(
test_client: AsyncClient,
get_manager_jwt_93: str,
):
path = os.path.join(test_dir, "fixtures", "import_beneficiaires_buggy.csv")
with open(path, "rb") as file:
response = await test_client.post(
"/v1/convert-file/beneficiaries",
files={"upload_file": ("filename", file, "text/csv")},
headers={"Authorization": "Bearer " + get_manager_jwt_93},
)
assert response.json()[0]["errors"][0]["key"] == "Date de naissance*"
assert (
response.json()[0]["errors"][0]["error"] == "none is not an allowed value"
)
assert response.json()[1]["errors"][0]["key"] == "Date de naissance*"
assert (
response.json()[1]["errors"][0]["error"]
== "Value is not a known date format. Valid format: YYYY-MM-DD."
)
async def test_structure_parse_csv(
test_client: AsyncClient,
csv_structure_filepath: str,
get_manager_jwt_93: str,
):
with open(csv_structure_filepath, "rb") as file:
response = await test_client.post(
"/v1/convert-file/structures",
files={"upload_file": ("filename", file, "text/csv")},
headers={"Authorization": "Bearer " + get_manager_jwt_93},
)
assert response.status_code == 200
async def METHOD_NAME(
test_client: AsyncClient,
csv_structure_buggy_filepath: str,
get_manager_jwt_93: str,
):
with open(csv_structure_buggy_filepath, "rb") as file:
response = await test_client.post(
"/v1/convert-file/structures",
files={"upload_file": ("filename", file, "text/csv")},
headers={"Authorization": "Bearer " + get_manager_jwt_93},
)
data = response.json()
structure_with_errors = [
structure for structure in data if structure["valid"] is False
]
assert len(structure_with_errors) == 3
assert structure_with_errors[0]["errors"][0]["key"] == "Nom"
assert (
structure_with_errors[0]["errors"][0]["error"]
== "none is not an allowed value"
)
assert len(structure_with_errors[1]["errors"]) == 4
assert structure_with_errors[1]["errors"][0]["key"] == "Site web"
assert (
structure_with_errors[1]["errors"][0]["error"]
== "invalid or missing URL scheme"
)
assert structure_with_errors[1]["errors"][1]["key"] == "Courriel"
assert (
structure_with_errors[1]["errors"][1]["error"]
== "value is not a valid email address"
)
assert structure_with_errors[1]["errors"][2]["key"] == "Siret"
assert (
structure_with_errors[1]["errors"][2]["error"]
== "value is not a valid siret"
)
assert structure_with_errors[1]["errors"][3]["key"] == "Courriel responsable"
assert (
structure_with_errors[1]["errors"][3]["error"]
== "value is not a valid email address"
)
assert len(structure_with_errors[2]["errors"]) == 3
assert structure_with_errors[2]["errors"][0]["key"] == "Site web"
assert (
structure_with_errors[2]["errors"][0]["error"]
== "invalid or missing URL scheme"
)
assert structure_with_errors[2]["errors"][1]["key"] == "Courriel responsable"
assert (
structure_with_errors[2]["errors"][1]["error"]
== "value is not a valid email address"
)
assert structure_with_errors[2]["errors"][2]["key"] == "Téléphones responsable"
assert (
structure_with_errors[2]["errors"][2]["error"]
== "value is not a valid phone number"
)
async def test_structure_parse_csv_with_missing_column_should_not_fail(
test_client: AsyncClient,
csv_structure_missing_key_filepath: str,
get_manager_jwt_93: str,
):
with open(csv_structure_missing_key_filepath, "rb") as file:
response = await test_client.post(
"/v1/convert-file/structures",
files={"upload_file": ("filename", file, "text/csv")},
headers={"Authorization": "Bearer " + get_manager_jwt_93},
)
data = response.json()
structure_with_errors = [
structure for structure in data if structure["valid"] is False
]
assert len(structure_with_errors) == 0 |
299,957 | get url | """
Basic scraper worker - should be inherited by workers to scrape specific types of content
"""
import collections
import requests
import random
import json
import abc
from pathlib import Path
from backend.lib.worker import BasicWorker
from common.config_manager import config
class BasicHTTPScraper(BasicWorker, metaclass=abc.ABCMeta):
"""
Abstract JSON scraper class
The job queue is continually checked for jobs of this scraper's type. If any are found,
the URL for that job is scraped and the result is parsed as JSON. The parsed JSON is
then passed to a processor method for further handling.
"""
log_level = "warning"
_logger_method = None
category = "Collector"
def __init__(self, job, logger=None, manager=None, modules=None):
"""
Set up database connection - we need one to store the thread data
"""
super().__init__(logger=logger, manager=manager, job=job, modules=modules)
self.prefix = self.type.split("-")[0]
# Names were updated to be more consistent with the rest of the codebase, but we still need to support the old database
# TODO: update database.sql names and create migrate script, then remove this
self.prefix = {
"fourchan": "4chan",
"eightkun": "8kun",
"eightchan": "8chan",
}[self.prefix]
if not hasattr(logger, self.log_level):
self.log_level = "warning"
self._logger_method = getattr(logger, self.log_level)
def work(self):
"""
Scrape something
This requests data according to the job's parameter - either from a
local file or from a URL. The job is then either finished or released
depending on whether that was successful, and the data is processed
further if available.
"""
if "file" in self.job.details:
# if the file is available locally, use that file
id = self.job.details["file"]
local_path = Path(self.job.details["file"])
if not local_path.exists():
self.job.finish()
self.log.error("Scraper was told to use source file %s, but file does not exist, cancelling job." % self.job.details["file"])
return
with local_path.open() as source:
datafields = {
"status_code": 200,
"content": source.read()
}
data = collections.namedtuple("object", datafields.keys())(*datafields.values())
else:
# if not, see what URL we need to request data from
url = self.METHOD_NAME()
try:
# see if any proxies were configured that would work for this URL
protocol = url.split(":")[0]
if protocol in config.get('SCRAPE_PROXIES', []) and config.get('SCRAPE_PROXIES')[protocol]:
proxies = {protocol: random.choice(config.get('SCRAPE_PROXIES')[protocol])}
else:
proxies = None
# do the request!
data = requests.get(url, timeout=config.get('SCRAPE_TIMEOUT', 60), proxies=proxies, headers={"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15"})
except (requests.exceptions.RequestException, ConnectionRefusedError) as e:
if self.job.data["attempts"] > 2:
self.job.finish()
self.log.error("Could not finish request for %s (%s), cancelling job" % (url, e))
else:
self.job.release(delay=random.randint(45,60))
self.log.info("Could not finish request for %s (%s), releasing job" % (url, e))
return
if "board" in self.job.details:
id = self.job.details["board"] + "/" + self.job.data["remote_id"]
else:
id = self.job.data["remote_id"]
if data.status_code == 404:
# this should be handled differently from an actually erroneous response
# because it may indicate that the resource has been deleted
self.not_found()
else:
parsed_data = self.parse(data.content)
if parsed_data is None:
if self.job.data["attempts"] < 2:
self.log.info("Data for %s %s could not be parsed, retrying later" % (self.type, id))
self.job.release(delay=random.choice(range(15, 45))) # try again later
else:
self._logger_method("Data for %s %s could not be parsed after %i attempts, aborting" % (
self.type, id, self.job.data["attempts"]))
self.job.finish()
return
# finally, pass it on
self.process(parsed_data)
self.after_process()
def after_process(self):
"""
After processing, declare job finished
"""
self.job.finish()
def not_found(self):
"""
Called if the job could not be completed because the request returned
a 404 response. This does not necessarily indicate failure.
"""
self.job.finish()
def parse(self, data):
"""
Parse incoming data
Can be overridden to, e.g., parse JSON data
:param data: Body of HTTP request
:return: Parsed data
"""
return data
@abc.abstractmethod
def process(self, data):
"""
Process scraped data
:param data: Parsed JSON data
"""
pass
@abc.abstractmethod
def METHOD_NAME(self):
"""
Get URL to scrape
:return string: URL to scrape
"""
pass
class BasicJSONScraper(BasicHTTPScraper, metaclass=abc.ABCMeta):
"""
Scraper for JSON-based data
"""
def parse(self, data):
"""
Parse data as JSON
:param str data: Incoming JSON-encoded data
:return: Decoded JSON object
"""
try:
return json.loads(data)
except json.JSONDecodeError:
return None |
299,958 | test sync single blob to blob | import json
import os
import shutil
import time
import urllib
from collections import namedtuple
import utility as util
import unittest
# Temporary tests (mostly copy-pasted from blob tests) to guarantee simple sync scenarios still work
# TODO Replace with better tests in the future
class Blob_Sync_User_Scenario(unittest.TestCase):
def test_sync_single_blob_with_local(self):
# create file of size 1KB.
filename = "test_1kb_blob_sync.txt"
file_path = util.create_test_file(filename, 1024)
blob_path = util.get_resource_sas(filename)
# Upload 1KB file using azcopy.
src = file_path
dest = blob_path
result = util.Command("cp").add_arguments(src).add_arguments(dest). \
add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
# Verifying the uploaded blob.
# the resource local path should be the first argument for the azcopy validator.
# the resource sas should be the second argument for azcopy validator.
resource_url = util.get_resource_sas(filename)
result = util.Command("testBlob").add_arguments(file_path).add_arguments(resource_url).execute_azcopy_verify()
self.assertTrue(result)
# Sync 1KB file to local using azcopy.
src = blob_path
dest = file_path
result = util.Command("sync").add_arguments(src).add_arguments(dest). \
add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
# Sync 1KB file to blob using azcopy.
# reset local file lmt first
util.create_test_file(filename, 1024)
src = file_path
dest = blob_path
result = util.Command("sync").add_arguments(src).add_arguments(dest). \
add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
def test_sync_entire_directory_with_local(self):
dir_name = "dir_sync_test"
dir_path = util.create_test_n_files(1024, 10, dir_name)
# create sub-directory inside directory
sub_dir_name = os.path.join(dir_name, "sub_dir_sync_test")
util.create_test_n_files(1024, 10, sub_dir_name)
# upload the directory with 20 files
# upload the directory
# execute azcopy command
result = util.Command("copy").add_arguments(dir_path).add_arguments(util.test_container_url). \
add_flags("recursive", "true").add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
# execute the validator.
vdir_sas = util.get_resource_sas(dir_name)
result = util.Command("testBlob").add_arguments(dir_path).add_arguments(vdir_sas). \
add_flags("is-object-dir", "true").execute_azcopy_verify()
self.assertTrue(result)
# sync to local
src = vdir_sas
dst = dir_path + "/"
result = util.Command("sync").add_arguments(src).add_arguments(dst).add_flags("log-level", "info")\
.execute_azcopy_copy_command()
self.assertTrue(result)
# sync back to blob after recreating the files
util.create_test_n_files(1024, 10, sub_dir_name)
src = dir_path
dst = vdir_sas
result = util.Command("sync").add_arguments(src).add_arguments(dst).add_flags("log-level", "info") \
.execute_azcopy_copy_command()
self.assertTrue(result)
def METHOD_NAME(self):
content_file_name = "test_1kb_blob_sync.txt"
content_file_path = util.create_test_file(content_file_name, 1024)
# create source and destination blobs of size 1KB.
# make sure to create the destination first so that it has an older lmt
src_blob_path = util.get_resource_sas("test_1kb_blob_sync_src.txt")
dst_blob_path = util.get_resource_sas("test_1kb_blob_sync_dst.txt")
result = util.Command("cp").add_arguments(content_file_path).add_arguments(dst_blob_path). \
add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
result = util.Command("cp").add_arguments(content_file_path).add_arguments(src_blob_path). \
add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
# verifying the uploaded blobs.
# the resource local path should be the first argument for the azcopy validator.
# the resource sas should be the second argument for azcopy validator.
result = util.Command("testBlob").add_arguments(content_file_path).add_arguments(src_blob_path).execute_azcopy_verify()
self.assertTrue(result)
result = util.Command("testBlob").add_arguments(content_file_path).add_arguments(dst_blob_path).execute_azcopy_verify()
self.assertTrue(result)
# perform the single blob sync using azcopy.
result = util.Command("sync").add_arguments(src_blob_path).add_arguments(dst_blob_path). \
add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
def test_sync_entire_vdir_to_vdir(self):
content_dir_name = "dir_sync_test"
content_dir_path = util.create_test_n_files(1024, 10, content_dir_name)
src_vdir_path = util.get_resource_sas("srcdir")
dst_vdir_path = util.get_resource_sas("dstdir/")
# create sub-directory inside directory
sub_dir_name = os.path.join(content_dir_name, "sub_dir_sync_test")
util.create_test_n_files(1024, 10, sub_dir_name)
# upload the directory with 20 files
# upload the directory
# execute azcopy command
result = util.Command("copy").add_arguments(content_dir_path).add_arguments(src_vdir_path). \
add_flags("recursive", "true").add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result)
# execute the validator.
result = util.Command("testBlob").add_arguments(content_dir_path).add_arguments(src_vdir_path). \
add_flags("is-object-dir", "true").execute_azcopy_verify()
self.assertTrue(result)
# sync to destination
result = util.Command("sync").add_arguments(src_vdir_path).add_arguments(dst_vdir_path)\
.add_flags("log-level", "info").execute_azcopy_copy_command()
self.assertTrue(result) |
299,959 | package id | import pathlib
import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.microsoft import check_min_vs, is_msvc
from conan.tools.apple import is_apple_os
from conan.tools.files import apply_conandata_patches, get, copy, rm
from conan.tools.build import check_min_cppstd
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
required_conan_version = ">=1.53.0"
class PackageConan(ConanFile):
name = "openassetio"
description = "An open-source interoperability standard for tools and content management systems used in media production."
license = "Apache-2.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/OpenAssetIO/OpenAssetIO"
topics = ("asset-pipeline", "vfx", "cg", "assetmanager", "vfx-pipeline")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"with_python": [True, False],
}
default_options = {
"shared": False,
"with_python": True,
}
short_paths = True
@property
def _min_cppstd(self):
return 17
@property
def _compilers_minimum_version(self):
return {
"gcc": "9",
"clang": "12",
"apple-clang": "12",
}
def configure(self):
if self.options.with_python:
if is_msvc(self):
# Required to create import .lib for building extension module.
self.options["cpython"].shared = True
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("tomlplusplus/3.2.0")
if self.options.with_python:
# TODO: cpython requires ncurses/6.2 but no pre-built package exists.
self.requires("ncurses/6.3")
self.requires("cpython/3.9.7")
self.requires("pybind11/2.10.1")
def validate(self):
if is_apple_os(self):
raise ConanInvalidConfiguration(
f"{self.ref} does not support MacOS at this time"
)
if self.settings.compiler.cppstd:
check_min_cppstd(self, self._min_cppstd)
if is_msvc(self) and not self.dependencies["cpython"].options.shared:
raise ConanInvalidConfiguration(f"{self.ref} requires cpython:shared=True when using MSVC compiler")
check_min_vs(self, 191)
if not is_msvc(self):
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
def build_requirements(self):
self.tool_requires("cmake/3.25.3")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["OPENASSETIO_ENABLE_TESTS"] = not self.conf.get("tools.build:skip_test", default=True, check_type=bool)
tc.variables["OPENASSETIO_GLIBCXX_USE_CXX11_ABI"] = self.settings.get_safe("compiler.libcxx") == "libstdc++11"
tc.variables["OPENASSETIO_ENABLE_PYTHON"] = self.options.with_python
if self.options.with_python:
tc.variables["Python_EXECUTABLE"] = self._python_exe
if is_msvc(self):
tc.variables["Python_LIBRARY"] = self._python_windows_lib
tc.generate()
tc = CMakeDeps(self)
tc.generate()
tc = VirtualBuildEnv(self)
tc.generate()
@property
def _python_exe(self):
# TODO: update to V2 once cpython is updated
return pathlib.Path(self.deps_user_info["cpython"].python).as_posix()
@property
def _python_windows_lib(self):
pth = pathlib.Path(
self.dependencies["cpython"].package_folder,
self.dependencies["cpython"].cpp_info.components["embed"].libdirs[0],
self.dependencies["cpython"].cpp_info.components["embed"].libs[0])
pth = pth.with_suffix(".lib")
return pth.as_posix()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def METHOD_NAME(self):
if self.options.with_python:
self.info.requires["cpython"].minor_mode()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rm(self, "OpenAssetIOConfig*.cmake", os.path.join(self.package_folder, "lib", "cmake", "OpenAssetIO"))
rm(self, "OpenAssetIOTargets*.cmake", os.path.join(self.package_folder, "lib", "cmake", "OpenAssetIO"))
rm(self, "*.pdb", os.path.join(self.package_folder, "lib"))
rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
def package_info(self):
self.cpp_info.libs = []
self.cpp_info.set_property("cmake_file_name", "OpenAssetIO")
self.cpp_info.set_property("cmake_target_name", "OpenAssetIO::OpenAssetIO")
self.cpp_info.set_property("cmake_build_modules", [os.path.join("lib", "cmake", "OpenAssetIO", "OpenAssetIOVariables.cmake")])
self.cpp_info.builddirs = [os.path.join("lib", "cmake")]
self.cpp_info.components["openassetio-core"].set_property("cmake_target_name", "OpenAssetIO::openassetio-core")
self.cpp_info.components["openassetio-core"].libs = ["openassetio"]
if self.options.with_python:
self.cpp_info.components["openassetio-python-bridge"].set_property("cmake_target_name", "OpenAssetIO::openassetio-python-bridge")
self.cpp_info.components["openassetio-python-bridge"].requires = ["openassetio-core"]
self.cpp_info.components["openassetio-python-bridge"].libs = ["openassetio-python"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "OpenAssetIO"
self.cpp_info.names["cmake_find_package_multi"] = "OpenAssetIO" |
299,960 | clear vertexlabels | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython
from compas.artists import VolMeshArtist
from .artist import GHArtist
class VolMeshArtist(GHArtist, VolMeshArtist):
"""Artist for drawing volmesh data structures.
Parameters
----------
volmesh : :class:`~compas.datastructures.VolMesh`
A COMPAS volmesh.
**kwargs : dict, optional
Additional keyword arguments.
See :class:`~compas_ghpython.artists.GHArtist` and :class:`~compas.artists.VolMeshArtist` for more info.
"""
def __init__(self, volmesh, **kwargs):
super(VolMeshArtist, self).__init__(volmesh=volmesh, **kwargs)
def draw(self, cells=None, color=None):
"""Draw a selection of cells.
Parameters
----------
cells : list[int], optional
A list of cells to draw.
The default is None, in which case all cells are drawn.
color : :class:`~compas.colors.Color` | dict[int, :class:`~compas.colors.Color`], optional
The color of the cells.
The default color is :attr:`VolMeshArtist.default_cellcolor`.
Returns
-------
list[:rhino:`Rhino.Geometry.Mesh`]
The GUIDs of the created Rhino objects.
Every cell is drawn as an individual mesh.
"""
return self.draw_cells(cells=cells, color=color)
def draw_vertices(self, vertices=None, color=None):
"""Draw a selection of vertices.
Parameters
----------
vertices : list
A list of vertices to draw.
Default is None, in which case all vertices are drawn.
color : :class:`~compas.colors.Color` | dict[int, :class:`~compas.colors.Color`]
The color specification for the vertices.
The default color of the vertices is :attr:`VolMeshArtist.default_vertexcolor`.
Returns
-------
list[:rhino:`Rhino.Geometry.Point3d`]
"""
self.vertex_color = color
vertices = vertices or self.vertices
vertex_xyz = self.vertex_xyz
points = []
for vertex in vertices:
points.append(
{
"pos": vertex_xyz[vertex],
"name": "{}.vertex.{}".format(self.volmesh.name, vertex),
"color": self.vertex_color[vertex].rgb255,
}
)
return compas_ghpython.draw_points(points)
def draw_edges(self, edges=None, color=None):
"""Draw a selection of edges.
Parameters
----------
edges : list[tuple[int, int]], optional
A list of edges to draw.
The default is None, in which case all edges are drawn.
color : :class:`~compas.colors.Color` | dict[tuple[int, int], :class:`~compas.colors.Color`], optional
The color specification for the edges.
The default color is :attr:`VolMeshArtist.default_edgecolor`.
Returns
-------
list[:rhino:`Rhino.Geometry.Line`]
"""
self.edge_color = color
edges = edges or self.edges
vertex_xyz = self.vertex_xyz
lines = []
for edge in edges:
u, v = edge
lines.append(
{
"start": vertex_xyz[u],
"end": vertex_xyz[v],
"color": self.edge_color[edge].rgb255,
"name": "{}.edge.{}-{}".format(self.volmesh.name, u, v),
}
)
return compas_ghpython.draw_lines(lines)
def draw_faces(self, faces=None, color=None, join_faces=False):
"""Draw a selection of faces.
Parameters
----------
faces : list[list[int]], optional
A list of faces to draw.
The default is None, in which case all faces are drawn.
color : :class:`~compas.colors.Color` | dict[int, :class:`~compas.colors.Color`], optional
The color specification for the faces.
The default color is :attr:`VolMeshArtist.default_facecolor`.
join_faces : bool, optional
If True, join the faces into one mesh.
Returns
-------
list[:rhino:`Rhino.Geometry.Mesh`]
"""
self.face_color = color
faces = faces or self.faces
vertex_xyz = self.vertex_xyz
facets = []
for face in faces:
facets.append(
{
"points": [vertex_xyz[vertex] for vertex in self.volmesh.halfface_vertices(face)],
"name": "{}.face.{}".format(self.volmesh.name, face),
"color": self.face_color[face].rgb255,
}
)
return compas_ghpython.draw_faces(facets)
def draw_cells(self, cells=None, color=None):
"""Draw a selection of cells.
Parameters
----------
cells : list[int], optional
A list of cells to draw.
The default is None, in which case all cells are drawn.
color : :class:`~compas.colors.Color` | dict[int, :class:`~compas.colors.Color`], optional
The color of the cells.
The default color is :attr:`VolMeshArtist.default_cellcolor`.
Returns
-------
list[:rhino:`Rhino.Geometry.Mesh`]
"""
self.cell_color = color
cells = cells or self.cells
vertex_xyz = self.vertex_xyz
meshes = []
for cell in cells:
vertices = self.volmesh.cell_vertices(cell)
faces = self.volmesh.cell_faces(cell)
vertex_index = dict((vertex, index) for index, vertex in enumerate(vertices))
vertices = [vertex_xyz[vertex] for vertex in vertices]
faces = [[vertex_index[vertex] for vertex in self.volmesh.halfface_vertices(face)] for face in faces]
mesh = compas_ghpython.draw_mesh(vertices, faces, color=self.cell_color[cell].rgb255)
meshes.append(mesh)
return meshes
def clear_vertices(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def clear_edges(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def clear_faces(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def clear_cells(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def METHOD_NAME(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def clear_edgelabels(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def clear_facelabels(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass
def clear_celllabels(self):
"""GH Artists are state-less. Therefore, clear does not have any effect.
Returns
-------
None
"""
pass |
299,961 | exists | # Copyright 2023 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IO Abstraction Layer.
The sole purpose of this abstraction layer is to avoid requiring tensorflow
as an open-source dependency solely for its tensorflow.io.gfile functions.
"""
import contextlib
from enum import Enum
import glob as glob_module
import importlib
import os
import shutil
from absl import logging
from . import errors
# Global Modes and selective import of tensorflow.io gfile.
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
gfile = None
if importlib.util.find_spec("tensorflow"):
from tensorflow.io import gfile # type: ignore
io_mode = BackendMode.TF
else:
logging.warning(
"Tensorflow library not found, tensorflow.io.gfile "
"operations will use native shim calls. "
"GCS paths (i.e. 'gs://...') cannot be accessed."
)
io_mode = BackendMode.DEFAULT
# Constants and Exceptions
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
# Overrides for testing.
@contextlib.contextmanager
def override_mode(override: BackendMode):
# pylint: disable=g-doc-return-or-yield
"""Returns a context manager that changes backend IO mode.
Args:
override: BackendMode enum value to set IO mode inside context.
"""
# pylint: enable=g-doc-return-or-yield
global io_mode
io_mode_prev = io_mode
io_mode = override
try:
yield
finally:
io_mode = io_mode_prev
def set_mode(override: BackendMode):
"""Sets global io mode.
Args:
override: BackendMode enum value to set for IO mode.
"""
global io_mode
io_mode = override
# tensorflow.io.gfile API shim functions.
def GFile(name, mode): # pylint: disable=invalid-name
if io_mode == BackendMode.DEFAULT:
if "b" in mode:
return open(name, mode) # pylint: disable=unspecified-encoding
else:
return open(name, mode, encoding="utf-8")
elif io_mode == BackendMode.TF:
return gfile.GFile(name, mode)
else:
raise ValueError("Unknown IO Backend Mode.")
def listdir(path):
if io_mode == BackendMode.DEFAULT:
return os.listdir(path=path)
elif io_mode == BackendMode.TF:
return gfile.listdir(path=path)
else:
raise ValueError("Unknown IO Backend Mode.")
def isdir(path):
if io_mode == BackendMode.DEFAULT:
return os.path.isdir(path)
elif io_mode == BackendMode.TF:
return gfile.isdir(path)
else:
raise ValueError("Unknown IO Backend Mode.")
def copy(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.METHOD_NAME(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
shutil.copy(src, dst)
return
elif io_mode == BackendMode.TF:
return gfile.copy(src, dst, overwrite=overwrite)
else:
raise ValueError("Unknown IO Backend Mode.")
def rename(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.METHOD_NAME(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
return os.rename(src, dst)
elif io_mode == BackendMode.TF:
return gfile.rename(src, dst, overwrite=overwrite)
else:
raise ValueError("Unknown IO Backend Mode.")
def METHOD_NAME(path):
if io_mode == BackendMode.DEFAULT:
return os.path.METHOD_NAME(path)
elif io_mode == BackendMode.TF:
return gfile.METHOD_NAME(path)
else:
raise ValueError("Unknown IO Backend Mode.")
def makedirs(path):
if io_mode == BackendMode.DEFAULT:
return os.makedirs(path, exist_ok=True)
elif io_mode == BackendMode.TF:
return gfile.makedirs(path)
else:
raise ValueError("Unknown IO Backend Mode.")
def glob(pattern):
if io_mode == BackendMode.DEFAULT:
return [
path.rstrip("/") for path in glob_module.glob(pattern, recursive=False)
]
elif io_mode == BackendMode.TF:
return gfile.glob(pattern)
else:
raise ValueError("Unknown IO Backend Mode.")
def remove(path):
"""Remove the file at path. Might fail if used on a directory path."""
if io_mode == BackendMode.DEFAULT:
return os.remove(path)
elif io_mode == BackendMode.TF:
return gfile.remove(path)
else:
raise ValueError("Unknown IO Backend Mode.")
def rmtree(path):
"""Remove a directory and recursively all contents inside. Might fail if used on a file path."""
if io_mode == BackendMode.DEFAULT:
return shutil.rmtree(path)
elif io_mode == BackendMode.TF:
return gfile.rmtree(path)
else:
raise ValueError("Unknown IO Backend Mode.")
def getsize(path):
"""Return the size, in bytes, of path."""
if io_mode == BackendMode.DEFAULT:
return os.path.getsize(path)
elif io_mode == BackendMode.TF:
return gfile.stat(path).length
else:
raise ValueError("Unknown IO Backend Mode.") |
299,962 | pisa story | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
from reportlab.lib import pdfencrypt
from reportlab.platypus.flowables import Spacer
from reportlab.platypus.frames import Frame
from xhtml2pdf.builders.signs import PDFSignature
from xhtml2pdf.builders.watermarks import WaterMarks
from xhtml2pdf.context import pisaContext
from xhtml2pdf.default import DEFAULT_CSS
from xhtml2pdf.parser import pisaParser
from xhtml2pdf.util import pypdf, getBox
from xhtml2pdf.files import pisaTempFile, cleanFiles
from xhtml2pdf.xhtml2pdf_reportlab import PmlBaseDoc, PmlPageTemplate
from html import escape as html_escape
log = logging.getLogger("xhtml2pdf")
def pisaErrorDocument(dest, c):
out = pisaTempFile(capacity=c.capacity)
out.write("<p style='background-color:red;'><strong>%d error(s) occured:</strong><p>" % c.err)
for mode, line, msg, _ in c.log:
if mode == "error":
out.write("<pre>%s in line %d: %s</pre>" %
(mode, line, html_escape(msg)))
out.write("<p><strong>%d warning(s) occured:</strong><p>" % c.warn)
for mode, line, msg, _ in c.log:
if mode == "warning":
out.write("<p>%s in line %d: %s</p>" %
(mode, line, html_escape(msg)))
return pisaDocument(out.getvalue(), dest, raise_exception=False)
def METHOD_NAME(src, path=None, link_callback=None, debug=0, default_css=None,
xhtml=False, encoding=None, context=None, xml_output=None,
**kw):
# Prepare Context
if not context:
context = pisaContext(path, debug=debug)
context.pathCallback = link_callback
# Use a default set of CSS definitions to get an expected output
if default_css is None:
default_css = DEFAULT_CSS
# Parse and fill the story
pisaParser(src, context, default_css, xhtml, encoding, xml_output)
# Avoid empty documents
if not context.story:
context.story = [Spacer(1, 1)]
if context.indexing_story:
context.story.append(context.indexing_story)
# Remove anchors if they do not exist (because of a bug in Reportlab)
for frag, anchor in context.anchorFrag:
if anchor not in context.anchorName:
frag.link = None
return context
def get_encrypt_instance(data):
if data is None:
return
if isinstance(data, str):
return pdfencrypt.StandardEncryption(data)
return data
def pisaDocument(src, dest=None, dest_bytes=False, path=None, link_callback=None, debug=0,
default_css=None, xhtml=False, encoding=None, xml_output=None,
raise_exception=True, capacity=100 * 1024, context_meta=None,
encrypt=None, signature=None,
**kw):
log.debug("pisaDocument options:\n src = %r\n dest = %r\n path = %r\n link_callback = %r\n xhtml = %r\n context_meta = %r",
src,
dest,
path,
link_callback,
xhtml,
context_meta)
# Prepare simple context
context = pisaContext(path, debug=debug, capacity=capacity)
if context_meta is not None:
context.meta.update(context_meta)
context.pathCallback = link_callback
# Build story
context = METHOD_NAME(src, path, link_callback, debug, default_css, xhtml,
encoding, context=context, xml_output=xml_output)
# Buffer PDF into memory
out = io.BytesIO()
doc = PmlBaseDoc(
out,
pagesize=context.pageSize,
author=context.meta["author"].strip(),
subject=context.meta["subject"].strip(),
keywords=[x.strip() for x in
context.meta["keywords"].strip().split(",") if x],
title=context.meta["title"].strip(),
showBoundary=0,
encrypt=get_encrypt_instance(encrypt),
allowSplitting=1)
# Prepare templates and their frames
if "body" in context.templateList:
body = context.templateList["body"]
del context.templateList["body"]
else:
x, y, w, h = getBox("1cm 1cm -1cm -1cm", context.pageSize)
body = PmlPageTemplate(
id="body",
frames=[
Frame(x, y, w, h,
id="body",
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0)],
pagesize=context.pageSize)
doc.addPageTemplates([body] + list(context.templateList.values()))
# Use multibuild e.g. if a TOC has to be created
if context.multiBuild:
doc.multiBuild(context.story)
else:
doc.build(context.story)
# Add watermarks
output=io.BytesIO()
output, has_bg=WaterMarks.process_doc(context, out, output)
if not has_bg:
output=out
if signature:
signoutput = io.BytesIO()
do_ok=PDFSignature.sign(output,signoutput, signature)
if do_ok:
output=signoutput
# Get the resulting PDF and write it to the file object
# passed from the caller
# Get the resulting PDF and write it to the file object
# passed from the caller
if dest is None:
# No output file was passed - Let's use a pisaTempFile
dest = io.BytesIO()
context.dest = dest
data = output.getvalue()
context.dest.write(data) # TODO: context.dest is a tempfile as well...
cleanFiles()
if dest_bytes:
return data
return context
|
299,963 | get name | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2023 NV Access Limited, Leonard de Ruijter
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""Module for native UIA implementations of SysListView32, e.g. in Windows Forms."""
from typing import Dict, List, Optional, Type
from comtypes import COMError
import config
from logHandler import log
from config.configFlags import ReportTableHeaders
import UIAHandler
from .. import NVDAObject
from ..behaviors import RowWithFakeNavigation
from . import ListItem, UIA
def findExtraOverlayClasses(obj: NVDAObject, clsList: List[Type[NVDAObject]]) -> None:
UIAControlType = obj.UIAElement.cachedControlType
if UIAControlType == UIAHandler.UIA.UIA_ListControlTypeId:
clsList.insert(0, SysListViewList)
elif UIAControlType == UIAHandler.UIA.UIA_ListItemControlTypeId and isinstance(obj.parent, SysListViewList):
clsList.insert(0, SysListViewItem)
if obj.parent._getUIACacheablePropertyValue(UIAHandler.UIA.UIA_IsTablePatternAvailablePropertyId):
clsList.insert(0, RowWithFakeNavigation)
class SysListViewList(UIA):
...
class SysListViewItem(ListItem):
def METHOD_NAME(self) -> str:
parent = self.parent
if not isinstance(parent, SysListViewList) or self.childCount <= 1:
return super().name
childrenCacheRequest = UIAHandler.handler.baseCacheRequest.clone()
childrenCacheRequest.addProperty(UIAHandler.UIA.UIA_NamePropertyId)
childrenCacheRequest.addProperty(UIAHandler.UIA.UIA_TableItemColumnHeaderItemsPropertyId)
childrenCacheRequest.TreeScope = UIAHandler.TreeScope_Children
cachedChildren = self.UIAElement.buildUpdatedCache(childrenCacheRequest).getCachedChildren()
if not cachedChildren:
# There are no children
return super().name
textList = []
for index in range(cachedChildren.length):
e = cachedChildren.getElement(index)
name = e.cachedName
columnHeaderTextList = []
if (
name
and config.conf['documentFormatting']['reportTableHeaders'] in (
ReportTableHeaders.ROWS_AND_COLUMNS,
ReportTableHeaders.COLUMNS,
)
and index > 0
):
try:
columnHeaderItems = e.getCachedPropertyValueEx(
UIAHandler.UIA.UIA_TableItemColumnHeaderItemsPropertyId,
False
)
except COMError:
log.debugWarning("Couldn't fetch column header items", exc_info=True)
columnHeaderItems = None
else:
columnHeaderItems = None
if columnHeaderItems:
columnHeaderItems = columnHeaderItems.QueryInterface(UIAHandler.IUIAutomationElementArray)
for innerIndex in range(columnHeaderItems.length):
columnHeaderItem = columnHeaderItems.getElement(innerIndex)
columnHeaderTextList.append(columnHeaderItem.currentName)
columnHeaderText = " ".join(columnHeaderTextList)
if columnHeaderText:
text = f"{columnHeaderText} {name}"
else:
text = name
textList.append(text)
return "; ".join(textList)
def _get_indexInParent(self) -> Optional[int]:
parent = self.parent
if not isinstance(parent, SysListViewList) or self.childCount == 0:
return super().indexInParent
childCacheRequest = UIAHandler.handler.baseCacheRequest.clone()
childCacheRequest.addProperty(UIAHandler.UIA.UIA_GridItemRowPropertyId)
element = UIAHandler.handler.baseTreeWalker.GetFirstChildElementBuildCache(
self.UIAElement,
childCacheRequest
)
val = element.getCachedPropertyValueEx(
UIAHandler.UIA.UIA_GridItemRowPropertyId,
True
)
if val == UIAHandler.handler.reservedNotSupportedValue:
return super().indexInParent
return val
def _get_positionInfo(self) -> Dict[str, int]:
info = super().positionInfo or {}
itemIndex = 0
try:
itemIndex = self.indexInParent + 1
except (COMError, NotImplementedError):
pass
if itemIndex > 0:
info['indexInGroup'] = itemIndex
itemCount = 0
try:
itemCount = self.parent.rowCount
except (COMError, NotImplementedError):
pass
if itemCount > 0:
info['similarItemsInGroup'] = itemCount
return info |
299,964 | initialize loc | # Copyright (c) 2022 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
from gym import spaces
import numpy as np
import cv2
class BouncingSquares(gym.Env):
"""An environment which contains two bouncing squares. Either square bounces
when it hits the image border or the other square.
The environment has a dummy action NOOP which doesn't affect either square.
"""
def __init__(self,
N: int,
pixels_per_node: int = 1,
noise_level: float = 0.,
render_size: int = 640,
color: bool = False):
"""
Args:
N: the length of the map
pixels_per_node: when generating an image input, how many pixels are
drawn at each location.
noise_level: If >0, the generated images will be added a Gaussian noise
whose std is ``noise_level``.
color: whether the squares are colorful or grayscale.
"""
super().__init__()
size = N * pixels_per_node
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(size, size, 3 if color else 1),
dtype=np.uint8)
self.action_space = spaces.Discrete(2)
self._N = N
self._colors = ([(0, 255, 0), (0, 0, 255), (120, 120, 120),
(255, 255, 0), (0, 255, 255), (255, 0, 255),
(120, 0, 120), (0, 120, 120)] if color else None)
self._pixels_per_node = pixels_per_node
self._noise_level = noise_level
self._render_size = render_size
self.reset()
self.metadata.update({'render.modes': ["rgb_array"]})
def _get_square_boundary(self, loc, size):
"""inclusive"""
return (loc[0], loc[0] + size[0] - 1), (loc[1], loc[1] + size[1] - 1)
def reset(self):
def METHOD_NAME(size):
return np.array((np.random.randint(self._N - size[0]),
np.random.randint(self._N - size[1])))
def _initialize_speed():
return np.array((np.random.randint(-3, 4), np.random.randint(
-3, 4)))
def _initialize_size():
sizes = [4, 5]
return np.array((self._N // np.random.choice(sizes),
self._N // np.random.choice(sizes)))
self._size1 = _initialize_size()
self._size2 = _initialize_size()
self._square1 = METHOD_NAME(self._size1)
while True:
self._square2 = METHOD_NAME(self._size2)
if not self._collision():
break
if self._colors:
self._color1 = self._colors[np.random.randint(len(self._colors))]
self._color2 = self._colors[np.random.randint(len(self._colors))]
else:
self._color1 = self._color2 = None
self._speed1 = _initialize_speed()
self._speed2 = _initialize_speed()
return self._obs()
def _obs(self):
img = np.zeros((self._N, self._N, 3 if self._colors else 1),
dtype=np.uint8)
def _paint_square(sq, img, size, color):
(i0, i1), (j0, j1) = self._get_square_boundary(sq, size)
patch = img[i0:i1 + 1, j0:j1 + 1]
patch[:, :] = color
if self._noise_level > 0:
noise = (np.clip(np.random.randn(*patch.shape), 0, None) *
self._noise_level * 255)
patch += noise.astype(np.uint8)
_paint_square(self._square1, img, self._size1, self._color1 or 255)
_paint_square(self._square2, img, self._size2, self._color2 or 255)
img = cv2.resize(
img,
dsize=(self._N * self._pixels_per_node, ) * 2,
interpolation=cv2.INTER_NEAREST)
return img
def _collision(self):
(i0, i1), (j0, j1) = self._get_square_boundary(self._square1,
self._size1)
(y0, y1), (x0, x1) = self._get_square_boundary(self._square2,
self._size2)
if (i0 > y1 or y0 > i1) or (j0 > x1 or x0 > j1):
return False
return True
def step(self, action):
def _boundary_collision(sq, sp, s, osq):
if sq[0] < 0 or sq[0] + s[0] - 1 >= self._N:
return osq, sp * np.array([-1, 1])
if sq[1] < 0 or sq[1] + s[1] - 1 >= self._N:
return osq, sp * np.array([1, -1])
return sq, sp
# assuming sq1 and sq2 are 'safe' locations
sq1, sq2 = self._square1, self._square2
self._square1, self._speed1 = _boundary_collision(
self._square1 + self._speed1, self._speed1, self._size1, sq1)
self._square2, self._speed2 = _boundary_collision(
self._square2 + self._speed2, self._speed2, self._size2, sq2)
if self._collision():
self._speed1, self._speed2 = self._speed2, self._speed1
self._square1 = sq1
self._square2 = sq2
return self._obs(), 0., False, {}
def render(self, mode="human"):
obs = self._obs()
obs = cv2.resize(
obs,
dsize=(self._render_size, self._render_size),
interpolation=cv2.INTER_NEAREST)
if mode == "rgb_array":
return obs
else:
cv2.imshow("BouncingSquares", obs)
cv2.waitKey(500) |
299,965 | test exists fail | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
# -----------------------------------------------------------------------------
import os.path
import shutil
import subprocess
import tempfile
import pytest
from intake.util_tests import ex
TEST_CATALOG_YAML = os.path.join(os.path.dirname(__file__), "catalog1.yml")
def test_list():
cmd = [ex, "-m", "intake.cli.client", "list", TEST_CATALOG_YAML]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = process.communicate()
out = out.decode("utf-8")
assert len(out.strip().split("\n")) == 3
assert "entry1" in out
assert "entry1_part" in out
assert "use_example1" in out
def test_full_list():
cmd = [ex, "-m", "intake.cli.client", "list", "--full", TEST_CATALOG_YAML]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = process.communicate()
out = out.decode("utf-8")
assert "[entry1]" in out
assert "[entry1_part]" in out
assert "[use_example1]" in out
def test_describe():
cmd = [ex, "-m", "intake.cli.client", "describe", TEST_CATALOG_YAML, "entry1"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
out, _ = process.communicate()
expected = """\
[entry1] args={'urlpath': '{{ CATALOG_DIR }}/entry1_*.csv'}
[entry1] container=dataframe
[entry1] description=entry1 full
[entry1] direct_access=forbid
[entry1] driver=['csv']
[entry1] metadata={'foo': 'bar', 'bar': [1, 2, 3]}
[entry1] name=entry1
[entry1] plugin=['csv']
[entry1] user_parameters=[]
"""
assert out == expected
def test_exists_pass():
cmd = [ex, "-m", "intake.cli.client", "exists", TEST_CATALOG_YAML, "entry1"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
out, _ = process.communicate()
assert out == "True\n"
def METHOD_NAME():
cmd = [ex, "-m", "intake.cli.client", "exists", TEST_CATALOG_YAML, "entry2"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
out, _ = process.communicate()
assert out == "False\n"
def test_discover():
cmd = [ex, "-m", "intake.cli.client", "discover", TEST_CATALOG_YAML, "entry1"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
out, _ = process.communicate()
assert "'dtype':" in out
assert "'metadata':" in out
assert "'npartitions':" in out
assert "'shape':" in out
def test_get_pass():
cmd = [ex, "-m", "intake.cli.client", "get", TEST_CATALOG_YAML, "entry1"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
out, _ = process.communicate()
assert "Charlie1 25.0 3" in out
assert "Eve2 25.0 3" in out
def test_get_fail():
cmd = [ex, "-m", "intake.cli.client", "get", TEST_CATALOG_YAML, "entry2"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
_, err = process.communicate()
assert "KeyError('entry2'" in err
@pytest.fixture
def temp_current_working_directory():
prev_cwd = os.getcwd()
dirname = tempfile.mkdtemp()
os.chdir(dirname)
yield dirname
os.chdir(prev_cwd)
shutil.rmtree(dirname)
def test_example(temp_current_working_directory):
cmd = [ex, "-m", "intake.cli.client", "example"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = process.communicate()
assert process.returncode == 0
assert os.path.exists("us_states.yml")
assert os.path.exists("states_1.csv")
assert os.path.exists("states_2.csv")
# should fail second time due to existing files
cmd = [ex, "-m", "intake.cli.client", "example"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
_, err = process.communicate()
assert process.returncode > 0 |
299,966 | initial design | import scadnano as sc
def create_design():
m13_rotation = 6702
m13_variant = sc.M13Variant.p7560
# print(sc.m13(m13_rotation, m13_variant))
design = METHOD_NAME()
add_nicks(design)
add_crossovers(design)
scaffold = next(s for s in design.strands if
s.first_domain.helix == 5 and not s.first_domain.forward)
scaffold.set_scaffold()
design.assign_m13_to_scaffold(rotation=m13_rotation, variant=m13_variant)
return design
def METHOD_NAME():
max_offset = 1295
helices = [
# below uses cadnano honeycomb coordinates
# https://github.com/UC-Davis-molecular-computing/scadnano-python-package/blob/master/misc/cadnano-format-specs/v2.txt
sc.Helix(grid_position=(1, 1), max_offset=max_offset),
sc.Helix(grid_position=(0, 1), max_offset=max_offset),
sc.Helix(grid_position=(0, 2), max_offset=max_offset),
sc.Helix(grid_position=(1, 2), max_offset=max_offset),
sc.Helix(grid_position=(2, 2), max_offset=max_offset),
sc.Helix(grid_position=(2, 1), max_offset=max_offset),
# below uses original mistaken convention from mistaken cadnano specs
# sc.Helix(grid_position=(1, 0, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 0, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(1, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 0, 0), max_offset=max_offset),
# # below uses odd-q coordinates:
# sc.Helix(grid_position=(1, -1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 0, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(1, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 0, 0), max_offset=max_offset),
# below uses even-q coordinates:
# sc.Helix(grid_position=(1, 0, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 0, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(1, 2, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 0, 0), max_offset=max_offset),
# below uses odd-r coordinates:
# sc.Helix(grid_position=(1, 0, 0), max_offset=max_offset),
# sc.Helix(grid_position=(0, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(1, 2, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 2, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 1, 0), max_offset=max_offset),
# sc.Helix(grid_position=(2, 0, 0), max_offset=max_offset),
]
scafs = [
sc.Strand([sc.Domain(helix=0, forward=True, start=16, end=1276)]),
sc.Strand([sc.Domain(helix=1, forward=False, start=16, end=1276)]),
sc.Strand([sc.Domain(helix=2, forward=True, start=12, end=1272)]),
sc.Strand([sc.Domain(helix=3, forward=False, start=12, end=1272)]),
sc.Strand([sc.Domain(helix=4, forward=True, start=19, end=1279)]),
sc.Strand([sc.Domain(helix=5, forward=False, start=19, end=1279)]),
]
staps = [
sc.Strand([sc.Domain(helix=0, forward=False, start=42, end=1246)]),
sc.Strand([sc.Domain(helix=1, forward=True, start=42, end=1246)]),
sc.Strand([sc.Domain(helix=2, forward=False, start=42, end=1246)]),
sc.Strand([sc.Domain(helix=3, forward=True, start=42, end=1246)]),
sc.Strand([sc.Domain(helix=4, forward=False, start=42, end=1246)]),
sc.Strand([sc.Domain(helix=5, forward=True, start=42, end=1246)]),
]
strands = scafs + staps
return sc.Design(helices=helices, strands=strands, grid=sc.honeycomb)
def add_nicks(design: sc.Design):
design.add_nick(helix=5, offset=399, forward=False) # scaffold
for offset in range(56, 1246, 42):
design.add_nick(helix=0, offset=offset, forward=False)
design.add_nick(helix=3, offset=offset, forward=True)
for offset in range(70, 1246, 42):
design.add_nick(helix=1, offset=offset, forward=True)
design.add_nick(helix=4, offset=offset, forward=False)
for offset in range(84, 1246, 42):
design.add_nick(helix=2, offset=offset, forward=False)
design.add_nick(helix=5, offset=offset, forward=True)
def add_crossovers(design: sc.Design):
# staples interior
for offset in range(84, 1246, 42):
design.add_full_crossover(helix=0, helix2=1, offset=offset, forward=False)
design.add_full_crossover(helix=3, helix2=4, offset=offset, forward=True)
for offset in range(56, 1246, 42):
design.add_full_crossover(helix=1, helix2=2, offset=offset, forward=True)
design.add_full_crossover(helix=4, helix2=5, offset=offset, forward=False)
for offset in range(70, 1246, 42):
design.add_full_crossover(helix=2, helix2=3, offset=offset, forward=False)
design.add_full_crossover(helix=5, helix2=0, offset=offset, forward=True)
for offset in range(49, 1245, 42): # extra crossovers 5 - 0 for some reason
design.add_full_crossover(helix=5, helix2=0, offset=offset, forward=True)
# staples edges
design.add_half_crossover(helix=0, helix2=1, offset=42, forward=False)
design.add_half_crossover(helix=3, helix2=4, offset=42, forward=True)
design.add_half_crossover(helix=0, helix2=5, offset=1245, forward=False)
design.add_half_crossover(helix=2, helix2=3, offset=1245, forward=False)
# scaffold interior
crossovers = []
for offset in range(58, 1250, 42):
crossovers.append(sc.Crossover(helix=0, helix2=1, offset=offset, forward=True))
for offset in range(30, 1250, 42):
crossovers.append(sc.Crossover(helix=1, helix2=2, offset=offset, forward=False))
for offset in range(54, 1250, 42):
crossovers.append(sc.Crossover(helix=2, helix2=3, offset=offset, forward=True))
for offset in range(26, 1250, 42):
crossovers.append(sc.Crossover(helix=3, helix2=4, offset=offset, forward=False))
# scaffold edges
crossovers.append(sc.Crossover(helix=0, helix2=1, offset=16, forward=True, half=True))
crossovers.append(sc.Crossover(helix=2, helix2=3, offset=12, forward=True, half=True))
crossovers.append(sc.Crossover(helix=4, helix2=5, offset=19, forward=True, half=True))
crossovers.append(sc.Crossover(helix=0, helix2=1, offset=1275, forward=True, half=True))
crossovers.append(sc.Crossover(helix=2, helix2=3, offset=1271, forward=True, half=True))
crossovers.append(sc.Crossover(helix=4, helix2=5, offset=1278, forward=True, half=True))
design.add_crossovers(crossovers)
if __name__ == '__main__':
design = create_design()
design.write_scadnano_file(directory='output_designs') |
299,967 | clear | import sqlite3
from diskcache import DjangoCache
from django.core.cache.backends.base import DEFAULT_TIMEOUT
class CustomDjangoCache(DjangoCache):
"""
Inherits from the DjangoCache to better manage the error handling of the
diskcache package by try-catching methods that perform database operations
under the hood by compairing against the version of diskcache used in kolibri:
https://github.com/grantjenks/python-diskcache/blob/v4.1.0/diskcache/djangocache.py
"""
def add(
self,
key,
value,
timeout=DEFAULT_TIMEOUT,
version=None,
read=False,
tag=None,
retry=True,
):
try:
return super(CustomDjangoCache, self).add(
key, value, timeout, version, read, tag, retry
)
except sqlite3.OperationalError:
return False
def has_key(self, key, version=None):
"""Returns True if the key is in the cache and has not expired.
:param key: key for item
:param int version: key version number (default None, cache parameter)
:return: True if key is found
"""
try:
return super(CustomDjangoCache, self).has_key( # noqa: W601
key, version=version
)
except sqlite3.OperationalError:
return False
def get(
self,
key,
default=None,
version=None,
read=False,
expire_time=False,
tag=False,
retry=False,
):
try:
return super(CustomDjangoCache, self).get(
key, default, version, read, expire_time, tag, retry
)
except sqlite3.OperationalError:
return None
def set(
self,
key,
value,
timeout=DEFAULT_TIMEOUT,
version=None,
read=False,
tag=None,
retry=True,
):
try:
return super(CustomDjangoCache, self).set(
key, value, timeout, version, read, tag, retry
)
except sqlite3.OperationalError:
return False
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, retry=True):
try:
return super(CustomDjangoCache, self).touch(key, timeout, version, retry)
except sqlite3.OperationalError:
return False
def pop(
self, key, default=None, version=None, expire_time=False, tag=False, retry=True
):
try:
return super(CustomDjangoCache, self).pop(
key, default, version, expire_time, tag, retry
)
except sqlite3.OperationalError:
return None
def delete(self, key, version=None, retry=True):
try:
super(CustomDjangoCache, self).delete(key, version, retry)
except sqlite3.OperationalError:
pass
def incr(self, key, delta=1, version=None, default=None, retry=True):
try:
return super(CustomDjangoCache, self).incr(
key, delta, version, default, retry
)
except sqlite3.OperationalError:
return None
def decr(self, key, delta=1, version=None, default=None, retry=True):
try:
return super(CustomDjangoCache, self).decr(
key, delta, version, default, retry
)
except sqlite3.OperationalError:
return None
def expire(self, retry=False):
try:
return super(CustomDjangoCache, self).expire(retry)
except sqlite3.OperationalError:
return 0
def stats(self, enable=True, reset=False):
try:
return super(CustomDjangoCache, self).stats(enable, reset)
except sqlite3.OperationalError:
return 0, 0
def create_tag_index(self):
try:
super(CustomDjangoCache, self).create_tag_index()
except sqlite3.OperationalError:
pass
def drop_tag_index(self):
try:
super(CustomDjangoCache, self).drop_tag_index()
except sqlite3.OperationalError:
pass
def evict(self, tag):
try:
return super(CustomDjangoCache, self).evict(tag)
except sqlite3.OperationalError:
return 0
def cull(self):
try:
return super(CustomDjangoCache, self).cull()
except sqlite3.OperationalError:
return 0
def METHOD_NAME(self):
try:
return super(CustomDjangoCache, self).METHOD_NAME()
except sqlite3.OperationalError:
return 0
def close(self, **kwargs):
try:
super(CustomDjangoCache, self).close(**kwargs)
except sqlite3.OperationalError:
pass |
299,968 | member | from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Exists, OuterRef
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from rdmo.core.models import Model
from rdmo.domain.models import Attribute
from rdmo.questions.models import Catalog, Question
from rdmo.tasks.models import Task
from rdmo.views.models import View
from ..managers import ProjectManager
class Project(MPTTModel, Model):
objects = ProjectManager()
parent = TreeForeignKey(
'self', null=True, blank=True,
on_delete=models.DO_NOTHING, related_name='children', db_index=True,
verbose_name=_('Parent project'),
help_text=_('The parent project of this project.')
)
user = models.ManyToManyField(
settings.AUTH_USER_MODEL, through='Membership', related_name='projects',
verbose_name=_('User'),
help_text=_('The list of users for this project.')
)
site = models.ForeignKey(
Site, on_delete=models.SET_NULL, null=True,
verbose_name=_('Site'),
help_text=_('The site this project belongs to (in a multi site setup).')
)
title = models.CharField(
max_length=256,
verbose_name=_('Title'),
help_text=_('The title for this project.')
)
description = models.TextField(
blank=True,
verbose_name=_('Description'),
help_text=_('A description for this project (optional).')
)
catalog = models.ForeignKey(
Catalog, related_name='projects', on_delete=models.SET_NULL, null=True,
verbose_name=_('Catalog'),
help_text=_('The catalog which will be used for this project.')
)
tasks = models.ManyToManyField(
Task, blank=True, through='Issue',
verbose_name=_('Tasks'),
help_text=_('The tasks that will be used for this project.')
)
views = models.ManyToManyField(
View, blank=True,
verbose_name=_('Views'),
help_text=_('The views that will be used for this project.')
)
class Meta:
ordering = ('tree_id', 'level', 'title')
verbose_name = _('Project')
verbose_name_plural = _('Projects')
class MPTTMeta:
order_insertion_by = ('title', )
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('project', kwargs={'pk': self.pk})
def clean(self):
if self.id and self.parent in self.get_descendants(include_self=True):
raise ValidationError({
'parent': [_('A project may not be moved to be a child of itself or one of its descendants.')]
})
@property
def progress(self):
# create a queryset for the attributes of the catalog for this project
# the subquery is used to query only attributes which have a question in the catalog, which is not optional
questions = Question.objects.filter(attribute_id=OuterRef('pk'), questionset__section__catalog_id=self.catalog.id) \
.exclude(is_optional=True)
attributes = Attribute.objects.annotate(active=Exists(questions)).filter(active=True).distinct()
# query the total number of attributes from the qs above
total = attributes.count()
# query all current values with attributes from the qs above, but where the text, option, or file field is set,
# and count only one value per attribute
values = self.values.filter(snapshot=None) \
.filter(attribute__in=attributes) \
.exclude((models.Q(text='') | models.Q(text=None)) & models.Q(option=None) &
(models.Q(file='') | models.Q(file=None))) \
.distinct().values('attribute').count()
try:
ratio = values / total
except ZeroDivisionError:
ratio = 0
return {
'total': total,
'values': values,
'ratio': ratio
}
@property
def catalog_uri(self):
if self.catalog is not None:
return self.catalog.uri
@cached_property
def METHOD_NAME(self):
return self.user.all()
@cached_property
def owners_str(self):
return ', '.join(['' if x is None else str(x) for x in self.user.filter(membership__role='owner')])
@cached_property
def owners(self):
return self.user.filter(memberships__role='owner')
@cached_property
def managers(self):
return self.user.filter(memberships__role='manager')
@cached_property
def authors(self):
return self.user.filter(memberships__role='author')
@cached_property
def guests(self):
return self.user.filter(memberships__role='guest')
@property
def file_size(self):
queryset = self.values.filter(snapshot=None).exclude(models.Q(file='') | models.Q(file=None))
return sum([value.file.size for value in queryset])
@receiver(pre_delete, sender=Project)
def reparent_children(sender, instance, **kwargs):
for child in instance.get_children():
child.move_to(instance.parent, 'last-child')
child.save() |
299,969 | safe timestamp | import os
import re
from datetime import datetime, timezone, timedelta
def parse_timestamp(timestamp, tzinfo=timezone.utc):
"""Parse a ISO 8601 timestamp string.
For naive/unaware dt, assume it is in tzinfo timezone (default: UTC).
"""
dt = datetime.fromisoformat(timestamp)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzinfo)
return dt
def parse_local_timestamp(timestamp, tzinfo=None):
"""Parse a ISO 8601 timestamp string.
For naive/unaware dt, assume it is in local timezone.
Convert to tzinfo timezone (the default None means: local timezone).
"""
dt = datetime.fromisoformat(timestamp)
if dt.tzinfo is None:
dt = dt.astimezone(tz=tzinfo)
return dt
def timestamp(s):
"""Convert a --timestamp=s argument to a datetime object"""
try:
# is it pointing to a file / directory?
ts = safe_s(os.stat(s).st_mtime)
return datetime.fromtimestamp(ts, tz=timezone.utc)
except OSError:
# didn't work, try parsing as a ISO timestamp. if no TZ is given, we assume local timezone.
return parse_local_timestamp(s)
# Not too rarely, we get crappy timestamps from the fs, that overflow some computations.
# As they are crap anyway (valid filesystem timestamps always refer to the past up to
# the present, but never to the future), nothing is lost if we just clamp them to the
# maximum value we can support.
# As long as people are using borg on 32bit platforms to access borg archives, we must
# keep this value True. But we can expect that we can stop supporting 32bit platforms
# well before coming close to the year 2038, so this will never be a practical problem.
SUPPORT_32BIT_PLATFORMS = True # set this to False before y2038.
if SUPPORT_32BIT_PLATFORMS:
# second timestamps will fit into a signed int32 (platform time_t limit).
# nanosecond timestamps thus will naturally fit into a signed int64.
# subtract last 48h to avoid any issues that could be caused by tz calculations.
# this is in the year 2038, so it is also less than y9999 (which is a datetime internal limit).
# msgpack can pack up to uint64.
MAX_S = 2**31 - 1 - 48 * 3600
MAX_NS = MAX_S * 1000000000
else:
# nanosecond timestamps will fit into a signed int64.
# subtract last 48h to avoid any issues that could be caused by tz calculations.
# this is in the year 2262, so it is also less than y9999 (which is a datetime internal limit).
# round down to 1e9 multiple, so MAX_NS corresponds precisely to a integer MAX_S.
# msgpack can pack up to uint64.
MAX_NS = (2**63 - 1 - 48 * 3600 * 1000000000) // 1000000000 * 1000000000
MAX_S = MAX_NS // 1000000000
def safe_s(ts):
if 0 <= ts <= MAX_S:
return ts
elif ts < 0:
return 0
else:
return MAX_S
def safe_ns(ts):
if 0 <= ts <= MAX_NS:
return ts
elif ts < 0:
return 0
else:
return MAX_NS
def METHOD_NAME(item_timestamp_ns):
t_ns = safe_ns(item_timestamp_ns)
return datetime.fromtimestamp(t_ns / 1e9, timezone.utc) # return tz-aware utc datetime obj
def format_time(ts: datetime, format_spec=""):
"""
Convert *ts* to a human-friendly format with textual weekday.
"""
return ts.strftime("%a, %Y-%m-%d %H:%M:%S %z" if format_spec == "" else format_spec)
def format_timedelta(td):
"""Format timedelta in a human friendly format"""
ts = td.total_seconds()
s = ts % 60
m = int(ts / 60) % 60
h = int(ts / 3600) % 24
txt = "%.2f seconds" % s
if m:
txt = "%d minutes %s" % (m, txt)
if h:
txt = "%d hours %s" % (h, txt)
if td.days:
txt = "%d days %s" % (td.days, txt)
return txt
def calculate_relative_offset(format_string, from_ts, earlier=False):
"""
Calculates offset based on a relative marker. 7d (7 days), 8m (8 months)
earlier: whether offset should be calculated to an earlier time.
"""
if from_ts is None:
from_ts = archive_ts_now()
if format_string is not None:
offset_regex = re.compile(r"(?P<offset>\d+)(?P<unit>[md])")
match = offset_regex.search(format_string)
if match:
unit = match.group("unit")
offset = int(match.group("offset"))
offset *= -1 if earlier else 1
if unit == "d":
return from_ts + timedelta(days=offset)
elif unit == "m":
return offset_n_months(from_ts, offset)
raise ValueError(f"Invalid relative ts offset format: {format_string}")
def offset_n_months(from_ts, n_months):
def get_month_and_year_from_total(total_completed_months):
month = (total_completed_months % 12) + 1
year = total_completed_months // 12
return month, year
# Calculate target month and year by getting completed total_months until target_month
total_months = (from_ts.year * 12) + from_ts.month + n_months - 1
target_month, target_year = get_month_and_year_from_total(total_months)
# calculate the max days of the target month by subtracting a day from the next month
following_month, year_of_following_month = get_month_and_year_from_total(total_months + 1)
max_days_in_month = (datetime(year_of_following_month, following_month, 1) - timedelta(1)).day
return datetime(day=min(from_ts.day, max_days_in_month), month=target_month, year=target_year).replace(
tzinfo=from_ts.tzinfo
)
class OutputTimestamp:
def __init__(self, ts: datetime):
self.ts = ts
def __format__(self, format_spec):
# we want to output a timestamp in the user's local timezone
return format_time(self.ts.astimezone(), format_spec=format_spec)
def __str__(self):
return f"{self}"
def isoformat(self):
# we want to output a timestamp in the user's local timezone
return self.ts.astimezone().isoformat(timespec="microseconds")
to_json = isoformat
def archive_ts_now():
"""return tz-aware datetime obj for current time for usage as archive timestamp"""
return datetime.now(timezone.utc) # utc time / utc timezone |
299,970 | write sst strings | ###############################################################################
#
# SharedStrings - A class for writing the Excel XLSX sharedStrings file.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2023, John McNamara, [email protected]
#
# Standard packages.
import re
# Package imports.
from . import xmlwriter
from .utility import preserve_whitespace
# Compile performance critical regular expressions.
re_control_chars_1 = re.compile("(_x[0-9a-fA-F]{4}_)")
re_control_chars_2 = re.compile(r"([\x00-\x08\x0b-\x1f])")
class SharedStrings(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX sharedStrings file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(SharedStrings, self).__init__()
self.string_table = None
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the sst element.
self._write_sst()
# Write the sst strings.
self.METHOD_NAME()
# Close the sst tag.
self._xml_end_tag("sst")
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_sst(self):
# Write the <sst> element.
xmlns = "http://schemas.openxmlformats.org/spreadsheetml/2006/main"
attributes = [
("xmlns", xmlns),
("count", self.string_table.count),
("uniqueCount", self.string_table.unique_count),
]
self._xml_start_tag("sst", attributes)
def METHOD_NAME(self):
# Write the sst string elements.
for string in self.string_table.string_array:
self._write_si(string)
def _write_si(self, string):
# Write the <si> element.
attributes = []
# Excel escapes control characters with _xHHHH_ and also escapes any
# literal strings of that type by encoding the leading underscore.
# So "\0" -> _x0000_ and "_x0000_" -> _x005F_x0000_.
# The following substitutions deal with those cases.
# Escape the escape.
string = re_control_chars_1.sub(r"_x005F\1", string)
# Convert control character to the _xHHHH_ escape.
string = re_control_chars_2.sub(
lambda match: "_x%04X_" % ord(match.group(1)), string
)
# Escapes non characters in strings.
string = string.replace("\uFFFE", "_xFFFE_")
string = string.replace("\uFFFF", "_xFFFF_")
# Add attribute to preserve leading or trailing whitespace.
if preserve_whitespace(string):
attributes.append(("xml:space", "preserve"))
# Write any rich strings without further tags.
if string.startswith("<r>") and string.endswith("</r>"):
self._xml_rich_si_element(string)
else:
self._xml_si_element(string, attributes)
# A metadata class to store Excel strings between worksheets.
class SharedStringTable(object):
"""
A class to track Excel shared strings between worksheets.
"""
def __init__(self):
self.count = 0
self.unique_count = 0
self.string_table = {}
self.string_array = []
def _get_shared_string_index(self, string):
""" " Get the index of the string in the Shared String table."""
if string not in self.string_table:
# String isn't already stored in the table so add it.
index = self.unique_count
self.string_table[string] = index
self.count += 1
self.unique_count += 1
return index
else:
# String exists in the table.
index = self.string_table[string]
self.count += 1
return index
def _get_shared_string(self, index):
""" " Get a shared string from the index."""
return self.string_array[index]
def _sort_string_data(self):
""" " Sort the shared string data and convert from dict to list."""
self.string_array = sorted(self.string_table, key=self.string_table.__getitem__)
self.string_table = {} |
299,971 | get sync identity provider | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSyncIdentityProviderResult',
'AwaitableGetSyncIdentityProviderResult',
'get_sync_identity_provider',
'get_sync_identity_provider_output',
]
@pulumi.output_type
class GetSyncIdentityProviderResult:
"""
SyncIdentityProvider represents a SyncIdentityProvider
"""
def __init__(__self__, id=None, name=None, resources=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resources and not isinstance(resources, str):
raise TypeError("Expected argument 'resources' to be a str")
pulumi.set(__self__, "resources", resources)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resources(self) -> Optional[str]:
return pulumi.get(self, "resources")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSyncIdentityProviderResult(GetSyncIdentityProviderResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSyncIdentityProviderResult(
id=self.id,
name=self.name,
resources=self.resources,
system_data=self.system_data,
type=self.type)
def METHOD_NAME(child_resource_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSyncIdentityProviderResult:
"""
The operation returns properties of a SyncIdentityProvider.
:param str child_resource_name: The name of the SyncIdentityProvider resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the OpenShift cluster resource.
"""
__args__ = dict()
__args__['childResourceName'] = child_resource_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:redhatopenshift/v20230701preview:getSyncIdentityProvider', __args__, opts=opts, typ=GetSyncIdentityProviderResult).value
return AwaitableGetSyncIdentityProviderResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
resources=pulumi.get(__ret__, 'resources'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_sync_identity_provider_output(child_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSyncIdentityProviderResult]:
"""
The operation returns properties of a SyncIdentityProvider.
:param str child_resource_name: The name of the SyncIdentityProvider resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the OpenShift cluster resource.
"""
... |
299,972 | test warm coinbase gas usage | """
abstract: Tests [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651)
Tests for [EIP-3651: Warm COINBASE](https://eips.ethereum.org/EIPS/eip-3651).
note: Tests ported from:
- [ethereum/tests/pull/1082](https://github.com/ethereum/tests/pull/1082).
"""
import pytest
from ethereum_test_forks import Shanghai, is_fork
from ethereum_test_tools import (
Account,
CodeGasMeasure,
Environment,
TestAddress,
Transaction,
to_address,
)
from ethereum_test_tools.vm.opcode import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-3651.md"
REFERENCE_SPEC_VERSION = "d94c694c6f12291bb6626669c3e8587eef3adff1"
# Amount of gas required to make a call to a warm account.
# Calling a cold account with this amount of gas results in exception.
GAS_REQUIRED_CALL_WARM_ACCOUNT = 100
@pytest.mark.valid_from("Shanghai")
@pytest.mark.parametrize(
"use_sufficient_gas",
[True, False],
ids=["sufficient_gas", "insufficient_gas"],
)
@pytest.mark.parametrize(
"opcode,contract_under_test_code,call_gas_exact",
[
(
"call",
Op.POP(Op.CALL(0, Op.COINBASE, 0, 0, 0, 0, 0)),
# Extra gas: COINBASE + 4*PUSH1 + 2*DUP1 + POP
GAS_REQUIRED_CALL_WARM_ACCOUNT + 22,
),
(
"callcode",
Op.POP(Op.CALLCODE(0, Op.COINBASE, 0, 0, 0, 0, 0)),
# Extra gas: COINBASE + 4*PUSH1 + 2*DUP1 + POP
GAS_REQUIRED_CALL_WARM_ACCOUNT + 22,
),
(
"delegatecall",
Op.POP(Op.DELEGATECALL(0, Op.COINBASE, 0, 0, 0, 0)),
# Extra: COINBASE + 3*PUSH1 + 2*DUP1 + POP
GAS_REQUIRED_CALL_WARM_ACCOUNT + 19,
),
(
"staticcall",
Op.POP(Op.STATICCALL(0, Op.COINBASE, 0, 0, 0, 0)),
# Extra: COINBASE + 3*PUSH1 + 2*DUP1 + POP
GAS_REQUIRED_CALL_WARM_ACCOUNT + 19,
),
],
ids=["CALL", "CALLCODE", "DELEGATECALL", "STATICCALL"],
)
def test_warm_coinbase_call_out_of_gas(
state_test,
fork,
opcode,
contract_under_test_code,
call_gas_exact,
use_sufficient_gas,
):
"""
Test that the coinbase is warm by accessing the COINBASE with each
of the following opcodes:
- CALL
- CALLCODE
- DELEGATECALL
- STATICCALL
"""
env = Environment(
coinbase="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
difficulty=0x20000,
gas_limit=10000000000,
number=1,
timestamp=1000,
)
caller_address = "0xcccccccccccccccccccccccccccccccccccccccc"
contract_under_test_address = 0x100
if not use_sufficient_gas:
call_gas_exact -= 1
caller_code = Op.SSTORE(
0,
Op.CALL(call_gas_exact, contract_under_test_address, 0, 0, 0, 0, 0),
)
pre = {
TestAddress: Account(balance=1000000000000000000000),
caller_address: Account(code=caller_code),
to_address(contract_under_test_address): Account(code=contract_under_test_code),
}
tx = Transaction(
ty=0x0,
chain_id=0x01,
nonce=0,
to=caller_address,
gas_limit=100000000,
gas_price=10,
)
post = {}
if use_sufficient_gas and is_fork(fork=fork, which=Shanghai):
post[caller_address] = Account(
storage={
# On shanghai and beyond, calls with only 100 gas to
# coinbase will succeed.
0: 1,
}
)
else:
post[caller_address] = Account(
storage={
# Before shanghai, calls with only 100 gas to
# coinbase will fail.
0: 0,
}
)
state_test(
env=env,
pre=pre,
post=post,
txs=[tx],
tag="opcode_" + opcode,
)
# List of opcodes that are affected by EIP-3651
gas_measured_opcodes = [
(
"EXTCODESIZE",
CodeGasMeasure(
code=Op.EXTCODESIZE(Op.COINBASE),
overhead_cost=2,
extra_stack_items=1,
),
),
(
"EXTCODECOPY",
CodeGasMeasure(
code=Op.EXTCODECOPY(Op.COINBASE, 0, 0, 0),
overhead_cost=2 + 3 + 3 + 3,
),
),
(
"EXTCODEHASH",
CodeGasMeasure(
code=Op.EXTCODEHASH(Op.COINBASE),
overhead_cost=2,
extra_stack_items=1,
),
),
(
"BALANCE",
CodeGasMeasure(
code=Op.BALANCE(Op.COINBASE),
overhead_cost=2,
extra_stack_items=1,
),
),
(
"CALL",
CodeGasMeasure(
code=Op.CALL(0xFF, Op.COINBASE, 0, 0, 0, 0, 0),
overhead_cost=3 + 2 + 3 + 3 + 3 + 3 + 3,
extra_stack_items=1,
),
),
(
"CALLCODE",
CodeGasMeasure(
code=Op.CALLCODE(0xFF, Op.COINBASE, 0, 0, 0, 0, 0),
overhead_cost=3 + 2 + 3 + 3 + 3 + 3 + 3,
extra_stack_items=1,
),
),
(
"DELEGATECALL",
CodeGasMeasure(
code=Op.DELEGATECALL(0xFF, Op.COINBASE, 0, 0, 0, 0),
overhead_cost=3 + 2 + 3 + 3 + 3 + 3,
extra_stack_items=1,
),
),
(
"STATICCALL",
CodeGasMeasure(
code=Op.STATICCALL(0xFF, Op.COINBASE, 0, 0, 0, 0),
overhead_cost=3 + 2 + 3 + 3 + 3 + 3,
extra_stack_items=1,
),
),
]
@pytest.mark.valid_from("Merge") # these tests fill for fork >= Berlin
@pytest.mark.parametrize(
"opcode,code_gas_measure",
gas_measured_opcodes,
ids=[i[0] for i in gas_measured_opcodes],
)
def METHOD_NAME(state_test, fork, opcode, code_gas_measure):
"""
Test the gas usage of opcodes affected by assuming a warm coinbase:
- EXTCODESIZE
- EXTCODECOPY
- EXTCODEHASH
- BALANCE
- CALL
- CALLCODE
- DELEGATECALL
- STATICCALL
"""
env = Environment(
coinbase="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
difficulty=0x20000,
gas_limit=10000000000,
number=1,
timestamp=1000,
)
measure_address = to_address(0x100)
pre = {
TestAddress: Account(balance=1000000000000000000000),
measure_address: Account(code=code_gas_measure, balance=1000000000000000000000),
}
if is_fork(fork, Shanghai):
expected_gas = GAS_REQUIRED_CALL_WARM_ACCOUNT # Warm account access cost after EIP-3651
else:
expected_gas = 2600 # Cold account access cost before EIP-3651
post = {
measure_address: Account(
storage={
0x00: expected_gas,
}
)
}
tx = Transaction(
ty=0x0,
chain_id=0x01,
nonce=0,
to=measure_address,
gas_limit=100000000,
gas_price=10,
)
state_test(
env=env,
pre=pre,
post=post,
txs=[tx],
tag="opcode_" + opcode.lower(),
) |
299,973 | items | # -*- coding: utf-8 -*-
# Copyright 2023 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://vipergirls.to/"""
from .common import Extractor, Message
from .. import text, util, exception
from ..cache import cache
from xml.etree import ElementTree
BASE_PATTERN = r"(?:https?://)?(?:www\.)?vipergirls\.to"
class VipergirlsExtractor(Extractor):
"""Base class for vipergirls extractors"""
category = "vipergirls"
root = "https://vipergirls.to"
request_interval = 0.5
request_interval_min = 0.2
cookies_domain = ".vipergirls.to"
cookies_names = ("vg_userid", "vg_password")
def _init(self):
self.session.headers["Referer"] = self.root + "/"
def METHOD_NAME(self):
self.login()
for post in self.posts():
data = post.attrib
data["thread_id"] = self.thread_id
yield Message.Directory, data
for image in post:
yield Message.Queue, image.attrib["main_url"], data
def login(self):
if self.cookies_check(self.cookies_names):
return
username, password = self._get_auth_info()
if username:
self.cookies_update(self._login_impl(username, password))
@cache(maxage=90*24*3600, keyarg=1)
def _login_impl(self, username, password):
self.log.info("Logging in as %s", username)
url = "{}/login.php?do=login".format(self.root)
data = {
"vb_login_username": username,
"vb_login_password": password,
"do" : "login",
"cookieuser" : "1",
}
response = self.request(url, method="POST", data=data)
if not response.cookies.get("vg_password"):
raise exception.AuthenticationError()
return {cookie.name: cookie.value
for cookie in response.cookies}
class VipergirlsThreadExtractor(VipergirlsExtractor):
"""Extractor for vipergirls threads"""
subcategory = "thread"
pattern = BASE_PATTERN + r"/threads/(\d+)(?:-[^/?#]+)?(/page\d+)?$"
test = (
(("https://vipergirls.to/threads/4328304"
"-2011-05-28-Danica-Simply-Beautiful-x112-4500x3000"), {
"url": "0d75cb42777f5bebc0d284d1d38cb90c750c61d9",
"count": 225,
}),
("https://vipergirls.to/threads/6858916-Karina/page4", {
"count": 1279,
}),
("https://vipergirls.to/threads/4328304"),
)
def __init__(self, match):
VipergirlsExtractor.__init__(self, match)
self.thread_id, self.page = match.groups()
def posts(self):
url = "{}/vr.php?t={}".format(self.root, self.thread_id)
root = ElementTree.fromstring(self.request(url).text)
posts = root.iter("post")
if self.page:
util.advance(posts, (text.parse_int(self.page[5:]) - 1) * 15)
return posts
class VipergirlsPostExtractor(VipergirlsExtractor):
"""Extractor for vipergirls posts"""
subcategory = "post"
pattern = (BASE_PATTERN +
r"/threads/(\d+)(?:-[^/?#]+)?\?p=\d+[^#]*#post(\d+)")
test = (
(("https://vipergirls.to/threads/4328304-2011-05-28-Danica-Simply-"
"Beautiful-x112-4500x3000?p=116038081&viewfull=1#post116038081"), {
"pattern": r"https://vipr\.im/\w{12}$",
"range": "2-113",
"count": 112,
"keyword": {
"id": "116038081",
"imagecount": "113",
"number": "116038081",
"thread_id": "4328304",
"title": "FemJoy Danica - Simply Beautiful (x112) 3000x4500",
},
}),
)
def __init__(self, match):
VipergirlsExtractor.__init__(self, match)
self.thread_id, self.post_id = match.groups()
def posts(self):
url = "{}/vr.php?p={}".format(self.root, self.post_id)
root = ElementTree.fromstring(self.request(url).text)
return root.iter("post") |
299,974 | test minimal yolo with connect | import deeplake
import pytest
from deeplake.util.exceptions import IngestionError
@pytest.mark.parametrize("shuffle", [True, False])
def test_minimal_yolo_ingestion(local_path, yolo_ingestion_data, shuffle):
params = {
"data_directory": yolo_ingestion_data["data_directory"],
"class_names_file": yolo_ingestion_data["class_names_file"],
}
ds = deeplake.ingest_yolo(**params, shuffle=shuffle, dest=local_path)
assert ds.path == local_path
assert "images" in ds.tensors
assert "boxes" in ds.tensors
assert "labels" in ds.tensors
assert len(ds.labels.info["class_names"]) > 0
assert ds.boxes.htype == "bbox"
def test_minimal_yolo_ingestion_no_class_names(local_path, yolo_ingestion_data):
params = {
"data_directory": yolo_ingestion_data["data_directory"],
"class_names_file": None,
}
ds = deeplake.ingest_yolo(**params, dest=local_path)
assert ds.path == local_path
assert "images" in ds.tensors
assert "boxes" in ds.tensors
assert "labels" in ds.tensors
assert ds.labels.info["class_names"] == []
assert ds.boxes.htype == "bbox"
def test_minimal_yolo_ingestion_separate_annotations(local_path, yolo_ingestion_data):
params = {
"data_directory": yolo_ingestion_data["data_directory_no_annotations"],
"class_names_file": yolo_ingestion_data["class_names_file"],
"annotations_directory": yolo_ingestion_data["annotations_directory"],
}
ds = deeplake.ingest_yolo(**params, dest=local_path)
assert ds.path == local_path
assert "images" in ds.tensors
assert "boxes" in ds.tensors
assert "labels" in ds.tensors
assert len(ds.labels.info["class_names"]) > 0
assert ds.boxes.htype == "bbox"
def test_minimal_yolo_ingestion_missing_annotations(local_path, yolo_ingestion_data):
params = {
"data_directory": yolo_ingestion_data["data_directory_missing_annotations"],
"class_names_file": yolo_ingestion_data["class_names_file"],
"allow_no_annotation": True,
}
ds = deeplake.ingest_yolo(**params, dest=local_path)
assert ds.path == local_path
assert "images" in ds.tensors
assert "boxes" in ds.tensors
assert "labels" in ds.tensors
assert len(ds.labels.info["class_names"]) > 0
assert ds.boxes.htype == "bbox"
def test_minimal_yolo_ingestion_unsupported_annotations(
local_path, yolo_ingestion_data
):
params = {
"data_directory": yolo_ingestion_data["data_directory_unsupported_annotations"],
"class_names_file": yolo_ingestion_data["class_names_file"],
}
with pytest.raises(IngestionError):
ds = deeplake.ingest_yolo(**params, dest=local_path)
def test_minimal_yolo_ingestion_bad_data_path(local_path, yolo_ingestion_data):
params = {
"data_directory": yolo_ingestion_data["data_directory"] + "corrupt_this_path",
"class_names_file": yolo_ingestion_data["class_names_file"],
}
with pytest.raises(IngestionError):
ds = deeplake.ingest_yolo(**params, dest=local_path)
def test_minimal_yolo_ingestion_poly(local_path, yolo_ingestion_data):
params = {
"data_directory": yolo_ingestion_data["data_directory"],
"class_names_file": yolo_ingestion_data["class_names_file"],
}
ds = deeplake.ingest_yolo(
**params,
dest=local_path,
coordinates_params={"name": "polygons", "htype": "polygon"},
)
assert ds.path == local_path
assert "images" in ds.tensors
assert "polygons" in ds.tensors
assert "labels" in ds.tensors
assert len(ds.labels.info["class_names"]) > 0
assert ds.polygons.htype == "polygon"
def METHOD_NAME(
s3_path,
yolo_ingestion_data,
hub_cloud_path,
hub_cloud_dev_token,
hub_cloud_dev_managed_creds_key,
):
params = {
"data_directory": yolo_ingestion_data["data_directory"],
"class_names_file": yolo_ingestion_data["class_names_file"],
}
ds = deeplake.ingest_yolo(
**params,
dest=s3_path,
connect_kwargs={
"dest_path": hub_cloud_path,
"creds_key": hub_cloud_dev_managed_creds_key,
"token": hub_cloud_dev_token,
},
)
assert ds.path == hub_cloud_path
assert "images" in ds.tensors
assert "boxes" in ds.tensors
assert "labels" in ds.tensors
assert len(ds.labels.info["class_names"]) > 0
assert ds.boxes.htype == "bbox"
def test_minimal_yolo_ingestion_with_linked_images(
s3_path,
yolo_ingestion_data,
hub_cloud_path,
hub_cloud_dev_token,
hub_cloud_dev_managed_creds_key,
):
params = {
"data_directory": yolo_ingestion_data["data_directory"],
"class_names_file": yolo_ingestion_data["class_names_file"],
}
ds = deeplake.ingest_yolo(
**params,
dest=s3_path,
image_params={
"name": "linked_images",
"htype": "link[image]",
"sample_compression": "png",
},
image_creds_key=hub_cloud_dev_managed_creds_key,
connect_kwargs={
"dest_path": hub_cloud_path,
"creds_key": hub_cloud_dev_managed_creds_key,
"token": hub_cloud_dev_token,
},
)
assert ds.path == hub_cloud_path
assert "linked_images" in ds.tensors
assert "boxes" in ds.tensors
assert "labels" in ds.tensors
assert len(ds.labels.info["class_names"]) > 0
assert ds.linked_images.htype == "link[image]" |
299,975 | set up | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import ServiceOffering, DiskOffering, Account, VirtualMachine,\
queryAsyncJobResult, PASS
from marvin.lib.common import get_domain, get_zone, get_test_template
from pytz import timezone
class TestAsyncJob(cloudstackTestCase):
"""
Test queryAsyncJobResult
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAsyncJob, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
cls._cleanup = []
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
cls._cleanup.append(cls.disk_offering)
@classmethod
def tearDownClass(cls):
super(TestAsyncJob,cls).tearDownClass()
def METHOD_NAME(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
def tearDown(self):
super(TestAsyncJob,self).tearDown()
@attr(tags=["advanced", "eip", "advancedns", "basic", "sg"], required_hardware="false")
def test_query_async_job_result(self):
"""
Test queryAsyncJobResult API for expected values
"""
self.debug("Deploying instance in the account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hypervisor=self.hypervisor
)
self.cleanup.append(virtual_machine)
response = virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
cmd = queryAsyncJobResult.queryAsyncJobResultCmd()
cmd.jobid = virtual_machine.jobid
cmd_response = self.apiclient.queryAsyncJobResult(cmd)
db_result = self.dbclient.execute("select * from async_job where uuid='%s'" %
virtual_machine.jobid)
# verify that 'completed' value from api equals 'removed' db column value
completed = cmd_response.completed
removed = timezone('UTC').localize(db_result[0][17])
removed = removed.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertEqual(completed, removed,
"Expected 'completed' timestamp value %s to be equal to "
"'removed' db column value %s." % (completed, removed))
# verify that api job_status value equals db job_status value
jobstatus_db = db_result[0][8]
jobstatus_api = cmd_response.jobstatus
self.assertEqual(jobstatus_api, jobstatus_db,
"Expected 'jobstatus' api value %s to be equal to "
"'job_status' db column value %s." % (jobstatus_api, jobstatus_db)) |
299,976 | bgp converge | #!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# bgp_tcp_mss.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2021 by
# Abhinay Ramesh <[email protected]>
#
"""
bgp_tcp_mss.py:
Test if works the following commands:
router bgp 65000
neighbor 192.168.255.2 tcp-mss 500
Need to verify if the tcp-mss value is reflected in the TCP session.
"""
import os
import sys
import json
import pytest
import functools
# add after imports, before defining classes or functions:
pytestmark = [pytest.mark.bgpd]
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
for routern in range(1, 3):
tgen.add_router("r{}".format(routern))
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for i, (rname, router) in enumerate(router_list.items(), 1):
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
def test_bgp_tcp_mss():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
router1 = tgen.gears["r1"]
router2 = tgen.gears["r2"]
def METHOD_NAME(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
expected = {
"192.168.255.2": {
"bgpState": "Established",
"addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 0}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_conf_tcp_mss(router, as_num, neigh):
router.vtysh_cmd(
"""configure terminal
router bgp {0}
neighbor {1} tcp-mss 500""".format(
as_num, neigh
)
)
def _bgp_clear_session(router):
router.vtysh_cmd("clear bgp *")
def _bgp_check_neighbor_tcp_mss(router, neigh):
output = json.loads(router.vtysh_cmd("show bgp neighbor {} json".format(neigh)))
expected = {
"{}".format(neigh): {"bgpTcpMssConfigured": 500, "bgpTcpMssSynced": 488}
}
return topotest.json_cmp(output, expected)
logger.info("Check if neighbor sessions are up in {}".format(router1.name))
test_func = functools.partial(METHOD_NAME, router1)
success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5)
assert result is None, 'Failed to see BGP convergence in "{}"'.format(router1.name)
logger.info("BGP neighbor session is up in {}".format(router1.name))
logger.info(
"Configure tcp-mss 500 on {} and reset the session".format(router1.name)
)
_bgp_conf_tcp_mss(router1, "65000", "192.168.255.2")
_bgp_clear_session(router1)
logger.info(
"Configure tcp-mss 500 on {} and reset the session".format(router2.name)
)
_bgp_conf_tcp_mss(router2, "65001", "192.168.255.1")
_bgp_clear_session(router2)
logger.info(
"Check if neighbor session is up after reset in {}".format(router1.name)
)
test_func = functools.partial(METHOD_NAME, router1)
success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5)
assert result is None, 'Failed to see BGP convergence after reset in "{}"'.format(
router1.name
)
logger.info(
"Verify if TCP MSS value is synced with neighbor in {}".format(router1.name)
)
test_func = functools.partial(_bgp_check_neighbor_tcp_mss, router1, "192.168.255.2")
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
result is None
), 'Failed to sync TCP MSS value over BGP session in "{}"'.format(router1.name)
logger.info("TCP MSS value is synced with neighbor in {}".format(router1.name))
logger.info(
"Verify if TCP MSS value is synced with neighbor in {}".format(router2.name)
)
test_func = functools.partial(_bgp_check_neighbor_tcp_mss, router2, "192.168.255.1")
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert (
result is None
), 'Failed to sync TCP MSS value over BGP session in "{}"'.format(router2.name)
logger.info("TCP MSS value is synced with neighbor in {}".format(router2.name))
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args)) |
299,977 | inc linux counter | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
import logging
from ctypes import c_ulong
from functools import lru_cache
from socket import AF_INET, AF_INET6, inet_ntop
from struct import pack
from typing import Tuple
import psutil
from magma.kernsnoopd import metrics
# TASK_COMM_LEN is the string length of binary names that the kernel reports.
# Value should be the same as found in <linux/sched.h>
TASK_COMM_LEN = 16
class EBPFHandler(abc.ABC):
"""
EBPFHandler class defines the interface for front-end programs
corresponding to loaded eBPF programs.
Method handle() must be implemented by a sub-class. Snooper will call the
handle() method of registered front-end programs periodically.
"""
def __init__(self, service_registry):
self._registry = service_registry
# only the first TASK_COMM_LEN letters of the service name are relevant
# here as the kernel is only sending those in task->comm
self._services = [
s[:TASK_COMM_LEN] for s in service_registry.list_services()
]
@abc.abstractmethod
def handle(self, bpf) -> None:
"""
Handle() should serve as the entry point of the front-end program
performing tasks such as reading metrics collected from the kernel and
storing them into Prometheus.
Args:
bpf: the bcc.BPF instance that was used to load the eBPF program
Raises:
NotImplementedError: Implement in sub-class
"""
raise NotImplementedError()
class ByteCounter(EBPFHandler):
"""
ByteCounter is the front-end program for ebpf/byte_count.bpf.c
"""
def __init__(self, service_registry):
super().__init__(service_registry)
# Addr is a ctypes array of two 64-bit ints. It is used to hold an IPv6
# address of int128 type. This type can be converted to tuple and back
# to make it hashable for caching.
self.Addr = c_ulong * 2
@lru_cache(maxsize=1024)
def _get_cmdline(self, pid: int) -> list:
"""
_get_cmdline returns the command line arguments that were password to
process with the given pid. It caches results in an LRU cache to reduce
cost of reading /proc every time.
Args:
pid: process id
Returns:
list of strings that make up the command line arguments
Raises:
psutil.NoSuchProcess when process with given pid does not exist.
Process may have already exited.
"""
return psutil.Process(pid=pid).cmdline()
@lru_cache(maxsize=1024)
def _ip_addr_to_str(self, family: int, daddr: Tuple[int, int]) -> str:
"""
_ip_addr_to_str returns a string representation of an IPv4 or IPv6
address. It caches results in an LRU cache to reduce cost of conversion
Args:
family: socket.AF_INET (v4) or socket.AF_INET6 (v6)
daddr: For IPv4, uint32 representation of address as the first item
in a tuple. For IPv6, 16-byte array representation of address.
Returns:
String representation of IP address, e.g., '127.0.0.1'
"""
if family == AF_INET:
return inet_ntop(AF_INET, pack('I', daddr[0]))
elif family == AF_INET6:
# noinspection PyTypeChecker
return inet_ntop(AF_INET6, self.Addr(*daddr))
else:
raise Exception("No valid socket family given!")
def handle(self, bpf):
"""
Handle() reads counters from the loaded byte_count program stored as
a dict in 'dest_counters' with key type key_t and value type counter_t
defined in ebpf/common.bpf.h
Args:
bpf: bcc.BPF object that was used to load eBPF program into kernel
"""
table = bpf['dest_counters']
for key, count in table.items():
d_host = self._ip_addr_to_str(key.family, tuple(key.daddr))
service_name = None
try:
service_name = self._get_source_service(key)
# TODO: destination service name inference does not work
# get destination service from host and port
logging.debug(
'%s sent %s bytes to (%s, %s)',
service_name,
count.value,
d_host,
key.dport,
)
_inc_service_counter(service_name, '', count.value)
except ValueError:
# use binary name if source service name was not inferred
binary_name = service_name or key.comm.decode()
METHOD_NAME(binary_name, count.value)
# clear eBPF counters
table.clear()
def _get_source_service(self, key) -> str:
"""
_get_source_service attempts to get Magma service from command line
arguments of running process or binary name
Args:
key: struct of type key_t from which service name is inferred
Returns:
Magma service name inferred from key
Raises:
ValueError: Could not infer service name from key
"""
try:
# get python service name from command line args
# e.g. "python3 -m magma.state.main"
cmdline = self._get_cmdline(key.pid)
python_service = self._get_service_from_cmdline(cmdline)
if python_service:
return python_service
# key.pid process has exited or was not a Python service
except (psutil.NoSuchProcess, IndexError):
binary_name = key.comm.decode()
if binary_name in self._services:
# was a non-Python service
return binary_name
raise ValueError('Could not infer service name from key %s' % key.comm)
def _get_service_from_cmdline(self, cmdline):
if cmdline[2].startswith('magma.'):
return cmdline[2].split('.')[1]
return None
def _inc_service_counter(source_service, dest_service, count) -> None:
"""
_inc_service_counter increments Prometheus byte counters for traffic
between gateway and cloud Magma services
Args:
source_service: traffic source service name used as label
dest_service: traffic destination service name used as label
count: byte count to increment
"""
metrics.MAGMA_BYTES_SENT_TOTAL.labels(
service_name=source_service,
dest_service=dest_service,
).inc(count)
def METHOD_NAME(binary_name, count) -> None:
"""
_inc_linux_counter increments Prometheus byte counters for traffic
originating from arbitrary linux binaries
Args:
binary_name: traffic source binary name used as label
count: byte count to increment
"""
metrics.LINUX_BYTES_SENT_TOTAL.labels(binary_name).inc(count)
# ebpf_handlers provides the mapping from ebpf source files
# (e.g. epbf/packet_count.bpf.c) to front-end program class
ebpf_handlers = {
'byte_count': ByteCounter,
} |
299,978 | raise warnings | from collections import namedtuple
import numpy as np
from ...util import dtype as dtypes
from ...exposure import is_low_contrast
from ..._shared.utils import warn
from math import floor, ceil
_default_colormap = 'gray'
_nonstandard_colormap = 'viridis'
_diverging_colormap = 'RdBu'
ImageProperties = namedtuple('ImageProperties',
['signed', 'out_of_range_float',
'low_data_range', 'unsupported_dtype'])
def _get_image_properties(image):
"""Determine nonstandard properties of an input image.
Parameters
----------
image : array
The input image.
Returns
-------
ip : ImageProperties named tuple
The properties of the image:
- signed: whether the image has negative values.
- out_of_range_float: if the image has floating point data
outside of [-1, 1].
- low_data_range: if the image is in the standard image
range (e.g. [0, 1] for a floating point image) but its
data range would be too small to display with standard
image ranges.
- unsupported_dtype: if the image data type is not a
standard skimage type, e.g. ``numpy.uint64``.
"""
immin, immax = np.min(image), np.max(image)
imtype = image.dtype.type
try:
lo, hi = dtypes.dtype_range[imtype]
except KeyError:
lo, hi = immin, immax
signed = immin < 0
out_of_range_float = (np.issubdtype(image.dtype, np.floating) and
(immin < lo or immax > hi))
low_data_range = (immin != immax and
is_low_contrast(image))
unsupported_dtype = image.dtype not in dtypes._supported_types
return ImageProperties(signed, out_of_range_float,
low_data_range, unsupported_dtype)
def METHOD_NAME(image_properties):
"""Raise the appropriate warning for each nonstandard image type.
Parameters
----------
image_properties : ImageProperties named tuple
The properties of the considered image.
"""
ip = image_properties
if ip.unsupported_dtype:
warn("Non-standard image type; displaying image with "
"stretched contrast.", stacklevel=3)
if ip.low_data_range:
warn("Low image data range; displaying image with "
"stretched contrast.", stacklevel=3)
if ip.out_of_range_float:
warn("Float image out of standard range; displaying "
"image with stretched contrast.", stacklevel=3)
def _get_display_range(image):
"""Return the display range for a given set of image properties.
Parameters
----------
image : array
The input image.
Returns
-------
lo, hi : same type as immin, immax
The display range to be used for the input image.
cmap : string
The name of the colormap to use.
"""
ip = _get_image_properties(image)
immin, immax = np.min(image), np.max(image)
if ip.signed:
magnitude = max(abs(immin), abs(immax))
lo, hi = -magnitude, magnitude
cmap = _diverging_colormap
elif any(ip):
METHOD_NAME(ip)
lo, hi = immin, immax
cmap = _nonstandard_colormap
else:
lo = 0
imtype = image.dtype.type
hi = dtypes.dtype_range[imtype][1]
cmap = _default_colormap
return lo, hi, cmap
def imshow(image, ax=None, show_cbar=None, **kwargs):
"""Show the input image and return the current axes.
By default, the image is displayed in grayscale, rather than
the matplotlib default colormap.
Images are assumed to have standard range for their type. For
example, if a floating point image has values in [0, 0.5], the
most intense color will be gray50, not white.
If the image exceeds the standard range, or if the range is too
small to display, we fall back on displaying exactly the range of
the input image, along with a colorbar to clearly indicate that
this range transformation has occurred.
For signed images, we use a diverging colormap centered at 0.
Parameters
----------
image : array, shape (M, N[, 3])
The image to display.
ax : `matplotlib.axes.Axes`, optional
The axis to use for the image, defaults to plt.gca().
show_cbar : boolean, optional.
Whether to show the colorbar (used to override default behavior).
**kwargs : Keyword arguments
These are passed directly to `matplotlib.pyplot.imshow`.
Returns
-------
ax_im : `matplotlib.pyplot.AxesImage`
The `AxesImage` object returned by `plt.imshow`.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
lo, hi, cmap = _get_display_range(image)
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', cmap)
kwargs.setdefault('vmin', lo)
kwargs.setdefault('vmax', hi)
ax = ax or plt.gca()
ax_im = ax.imshow(image, **kwargs)
if (cmap != _default_colormap and show_cbar is not False) or show_cbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(ax_im, cax=cax)
ax.get_figure().tight_layout()
return ax_im
def imshow_collection(ic, *args, **kwargs):
"""Display all images in the collection.
Returns
-------
fig : `matplotlib.figure.Figure`
The `Figure` object returned by `plt.subplots`.
"""
import matplotlib.pyplot as plt
if len(ic) < 1:
raise ValueError('Number of images to plot must be greater than 0')
# The target is to plot images on a grid with aspect ratio 4:3
num_images = len(ic)
# Two pairs of `nrows, ncols` are possible
k = (num_images * 12)**0.5
r1 = max(1, floor(k / 4))
r2 = ceil(k / 4)
c1 = ceil(num_images / r1)
c2 = ceil(num_images / r2)
# Select the one which is closer to 4:3
if abs(r1 / c1 - 0.75) < abs(r2 / c2 - 0.75):
nrows, ncols = r1, c1
else:
nrows, ncols = r2, c2
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
ax = np.asarray(axes).ravel()
for n, image in enumerate(ic):
ax[n].imshow(image, *args, **kwargs)
kwargs['ax'] = axes
return fig
def imread(*args, **kwargs):
import matplotlib.image
return matplotlib.image.imread(*args, **kwargs)
def _app_show():
from matplotlib.pyplot import show
show() |
299,979 | evaluate | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Code examples to demonstrate Keras model with AIMET """
import tensorflow as tf
from tensorflow.keras.applications import MobileNet
from keras.applications.vgg16 import preprocess_input
import numpy as np
from aimet_common.defs import CompressionScheme, CostMetric
from aimet_tensorflow.defs import SpatialSvdParameters
from aimet_tensorflow.compress import ModelCompressor
from aimet_tensorflow.defs import ModuleCompRatioPair
from aimet_tensorflow.utils.convert_tf_sess_to_keras import save_tf_session_single_gpu, save_as_tf_module_multi_gpu, \
load_tf_sess_variables_to_keras_single_gpu, load_keras_model_multi_gpu
def train(model):
"""
Trains using fake dataset
:param model: Keras model
:return: trained model
"""
# Create a fake dataset
x_train = np.random.rand(32, 224, 224, 3)
y_train = np.random.rand(32, )
x_train = preprocess_input(x_train)
y_train = tf.keras.utils.to_categorical(y_train, 1000)
model.compile('rmsprop', 'mse')
model.fit(x_train, y_train, epochs=1, batch_size=1, shuffle=False)
return model
def get_sess_from_keras_model():
"""
Gets TF session from keras model
:return: TF session
"""
tf.keras.backend.clear_session()
tf.keras.backend.set_learning_phase(1)
_ = MobileNet(weights=None, input_shape=(224, 224, 3))
sess = tf.compat.v1.keras.backend.get_session()
return sess
def compress_session(sess, compressible_ops):
"""
Compressed TF session
:param sess: Tf session
:param compressible_ops: layers to compress
:return: compressed session
"""
layer_a = sess.graph.get_operation_by_name(compressible_ops[0])
list_of_module_comp_ratio_pairs = [ModuleCompRatioPair(layer_a, 0.5)]
manual_params = SpatialSvdParameters.ManualModeParams(
list_of_module_comp_ratio_pairs=list_of_module_comp_ratio_pairs)
params = SpatialSvdParameters(input_op_names=['input_1'], output_op_names=['act_softmax/Softmax'],
mode=SpatialSvdParameters.Mode.manual, params=manual_params)
scheme = CompressionScheme.spatial_svd
metric = CostMetric.mac
# pylint: disable=unused-argument
def METHOD_NAME(sess, iterations, use_cuda):
return 1
sess, _ = ModelCompressor.compress_model(sess=sess,
working_dir="./",
eval_callback=METHOD_NAME,
eval_iterations=None,
input_shape=(1, 3, 224, 224),
compress_scheme=scheme,
cost_metric=metric,
parameters=params)
return sess
def convert_tf_session_to_keras_model():
"""
Convert an AIMET spatial SVD compressed session to a Keras model and train the Keras model with MirroredStrategy
"""
sess = get_sess_from_keras_model()
# For instance, if the first conv layer in MobilNetV1 graph is compressed, then:
compressed_ops = ['conv1/Conv2D']
compressed_sess = compress_session(sess, compressed_ops)
# Defining the input and output convs of the session for MobileNet model
input_op_name, output_op_name = "input_1:0", "act_softmax/Softmax:0"
# Step 1: Single Saving the compressed session
path = './saved_model_single_gpu'
save_tf_session_single_gpu(compressed_sess, path, input_op_name, output_op_name)
tf.keras.backend.clear_session()
# Step 2: Loading the correspnding Keras Model
tf.keras.backend.set_learning_phase(1)
model = load_tf_sess_variables_to_keras_single_gpu(path, compressed_ops)
# Single GPU training of the loaded Keras Model
train(model)
# To be able to do multi-gpu training the next two steps needs to be followed:
# Step 3: Re-Saving the Keras model to make it compatible with distribution strategy
saving_path = './saved_model_multi_gpu'
save_as_tf_module_multi_gpu(path, saving_path, compressed_ops, input_shape=(224, 224, 3))
tf.keras.backend.clear_session()
with tf.distribute.MirroredStrategy().scope():
tf.keras.backend.set_learning_phase(1)
# Step 4: Loading the keras model and Multi gpu training the model on given dataset
model = load_keras_model_multi_gpu(saving_path, input_shape=[224, 224, 3])
# Train model on Multi-GPU
train(model) |
299,980 | instance | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from unittest.mock import Mock
import pytest
from pytest_mock import MockerFixture
from molecule import config
from molecule.command import idempotence
@pytest.fixture()
def _patched_is_idempotent(mocker: MockerFixture) -> Mock:
return mocker.patch("molecule.command.idempotence.Idempotence._is_idempotent")
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture()
def METHOD_NAME(patched_config_validate, config_instance: config.Config):
config_instance.state.change_state("converged", True)
return idempotence.Idempotence(config_instance)
def test_execute(
mocker: MockerFixture,
caplog: pytest.LogCaptureFixture,
patched_ansible_converge,
_patched_is_idempotent: Mock,
METHOD_NAME,
):
METHOD_NAME.execute()
assert "default" in caplog.text
assert "idempotence" in caplog.text
patched_ansible_converge.assert_called_once_with()
_patched_is_idempotent.assert_called_once_with("patched-ansible-converge-stdout")
msg = "Idempotence completed successfully."
assert msg in caplog.text
def test_execute_raises_when_not_converged(
caplog: pytest.LogCaptureFixture,
patched_ansible_converge,
METHOD_NAME,
):
METHOD_NAME._config.state.change_state("converged", False)
with pytest.raises(SystemExit) as e:
METHOD_NAME.execute()
assert e.value.code == 1
msg = "Instances not converged. Please converge instances first."
assert msg in caplog.text
def test_execute_raises_when_fails_idempotence(
mocker: MockerFixture,
caplog: pytest.LogCaptureFixture,
patched_ansible_converge,
_patched_is_idempotent: Mock,
METHOD_NAME,
):
_patched_is_idempotent.return_value = False
with pytest.raises(SystemExit) as e:
METHOD_NAME.execute()
assert e.value.code == 1
msg = "Idempotence test failed because of the following tasks:\n"
assert msg in caplog.text
def test_is_idempotent(METHOD_NAME):
output = """
PLAY RECAP ***********************************************************
check-command-01: ok=3 changed=0 unreachable=0 failed=0
"""
assert METHOD_NAME._is_idempotent(output)
def test_is_idempotent_not_idempotent(METHOD_NAME):
output = """
PLAY RECAP ***********************************************************
check-command-01: ok=2 changed=1 unreachable=0 failed=0
check-command-02: ok=2 changed=1 unreachable=0 failed=0
"""
assert not METHOD_NAME._is_idempotent(output)
def test_non_idempotent_tasks_idempotent(METHOD_NAME):
output = """
PLAY [all] ***********************************************************
GATHERING FACTS ******************************************************
ok: [check-command-01]
TASK: [Idempotence test] *********************************************
ok: [check-command-01]
PLAY RECAP ***********************************************************
check-command-01: ok=3 changed=0 unreachable=0 failed=0
"""
result = METHOD_NAME._non_idempotent_tasks(output)
assert result == []
def test_non_idempotent_tasks_not_idempotent(METHOD_NAME):
output = """
PLAY [all] ***********************************************************
GATHERING FACTS ******************************************************
ok: [check-command-01]
ok: [check-command-02]
TASK: [Idempotence test] *********************************************
changed: [check-command-01]
changed: [check-command-02]
PLAY RECAP ***********************************************************
check-command-01: ok=2 changed=1 unreachable=0 failed=0
check-command-02: ok=2 changed=1 unreachable=0 failed=0
"""
result = METHOD_NAME._non_idempotent_tasks(output)
assert result == [
"* [check-command-01] => Idempotence test",
"* [check-command-02] => Idempotence test",
] |
299,981 | param generator params size in bytes | # Copyright (c) 2022 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from spinn_front_end_common.interface.ds import DataType
from pyNN.random import RandomDistribution, available_distributions
from spinn_front_end_common.utilities.constants import BYTES_PER_WORD
#: The generator param type for each data type
_GENERATOR_TYPES = {
DataType.S1615: 0,
DataType.UINT32: 1,
DataType.INT32: 2,
DataType.U032: 3
}
def get_generator_type(data_type):
"""
:param ~data_specification.enums.DataType data_type:
:return: The generator parameter type code for the given data type.
:rtype: int
:raises TypeError: If an unsupported data type is given
"""
if data_type in _GENERATOR_TYPES:
return _GENERATOR_TYPES[data_type]
raise TypeError(f"Ungeneratable type {data_type}")
def type_has_generator(data_type):
"""
:param ~data_specification.enums.DataType data_type:
:return:
Whether there is a generator parameter type code for the given data
type.
:rtype: bool
"""
return data_type in _GENERATOR_TYPES
#: ID of the constant parameter generator.
PARAM_TYPE_CONSTANT_ID = 0
#: IDs of the random parameter generators supported by the synapse expander.
PARAM_TYPE_BY_NAME = {
"uniform": 1,
"uniform_int": 1,
"normal": 2,
"normal_clipped": 3,
"normal_clipped_to_boundary": 4,
"exponential": 5
}
#: ID for the convolution kernel generator.
PARAM_TYPE_KERNEL = 6
def param_generator_id(value):
"""
:param value: The value to examine the type of.
:return: The ID of the on-chip generator that handles the value.
:rtype: int
:raises TypeError: If an value of an unsupported data type is given
"""
# Scalars are fine on the machine
if numpy.isscalar(value):
return PARAM_TYPE_CONSTANT_ID
# Only certain types of random distributions are supported for
# generation on the machine
if isinstance(value, RandomDistribution):
if value.name in PARAM_TYPE_BY_NAME:
return PARAM_TYPE_BY_NAME[value.name]
raise TypeError(f"Ungeneratable parameter {value}")
def is_param_generatable(value):
"""
:param value: The value to examine the type of.
:return: Whether the value is of a type that can be generated on chip.
:rtype: bool
"""
if isinstance(value, str):
return False
if numpy.isscalar(value):
return True
return (isinstance(value, RandomDistribution) and
value.name in PARAM_TYPE_BY_NAME)
def param_generator_params(values):
"""
Get the parameter generator parameters as a numpy array.
:param values:
:type values: int or ~pyNN.random.RandomDistribution
:rtype: ~numpy.ndarray
"""
if numpy.isscalar(values):
return numpy.array(
[DataType.S1615.encode_as_int(values)],
dtype=numpy.uint32)
if isinstance(values, RandomDistribution):
parameters = (
values.parameters.get(param_name, None)
for param_name in available_distributions[values.name])
parameters = (
DataType.S1615.max if param == numpy.inf
else DataType.S1615.min if param == -numpy.inf else param
for param in parameters if param is not None)
params = [
DataType.S1615.encode_as_int(param) for param in parameters]
return numpy.array(params, dtype=numpy.uint32)
raise ValueError(f"Unexpected value {values}")
#: At most, there are 4 words as param generator parameters
MAX_PARAMS_BYTES = 4 * BYTES_PER_WORD
def METHOD_NAME(values):
"""
Get the size of the parameter generator parameters in bytes.
:param values:
:type values: int or ~pyNN.random.RandomDistribution
:rtype: int
:raises TypeError: If `values` is of an unsupported data type
"""
if numpy.isscalar(values):
return BYTES_PER_WORD
if isinstance(values, RandomDistribution):
parameters = available_distributions[values.name]
return len(parameters) * BYTES_PER_WORD
raise ValueError(f"Unexpected value {values}") |
299,982 | test get center | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from shapely import wkt
from shapely.geometry import Point
from sedona.core.geom.circle import Circle
from sedona.core.geom.envelope import Envelope
class TestCircle:
def METHOD_NAME(self):
point = Point(0.0, 0.0)
circle = Circle(point, 0.1)
assert circle.centerGeometry.x == point.x and circle.centerGeometry.y == point.y
def test_get_radius(self):
point = Point(0.0, 0.0)
circle = Circle(point, 0.1)
assert circle.getRadius() == pytest.approx(0.1, 0.01)
def test_set_radius(self):
point = Point(0.0, 0.0)
circle = Circle(point, 0.1)
circle.setRadius(0.1)
assert circle.getRadius() == pytest.approx(0.1, 0.01)
def test_get_envelope_internal(self):
point = Point(0.0, 0.0)
circle = Circle(point, 0.1)
assert Envelope(-0.1, 0.1, -0.1, 0.1) == circle.getEnvelopeInternal()
def test_covers(self):
circle = Circle(Point(0.0, 0.0), 0.5)
assert circle.covers(Point(0.0, 0.0))
assert circle.covers(Point(0.1, 0.2))
assert not circle.covers(Point(0.4, 0.4))
assert not circle.covers(Point(-1, 0.4))
assert circle.covers(wkt.loads("MULTIPOINT ((0.1 0.1), (0.2 0.4))"))
assert not circle.covers(wkt.loads("MULTIPOINT ((0.1 0.1), (1.2 0.4))"))
assert not circle.covers(wkt.loads("MULTIPOINT ((1.1 0.1), (0.2 1.4))"))
assert circle.covers(wkt.loads("POLYGON ((-0.1 0.1, 0 0.4, 0.1 0.2, -0.1 0.1))"))
assert circle.covers(wkt.loads("POLYGON ((-0.5 0, 0 0.5, 0.5 0, -0.5 0))"))
assert not circle.covers(wkt.loads("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))"))
assert not circle.covers(wkt.loads("POLYGON ((0.4 0.4, 0.4 0.45, 0.45 0.45, 0.45 0.4, 0.4 0.4))"))
assert circle.covers(
wkt.loads("MULTIPOLYGON (((-0.1 0.1, 0 0.4, 0.1 0.2, -0.1 0.1)),((-0.5 0, 0 0.5, 0.5 0, -0.5 0)))")
)
assert not circle.covers(
wkt.loads("MULTIPOLYGON (((-0.1 0.1, 0 0.4, 0.1 0.2, -0.1 0.1)),((0 0, 0 1, 1 1, 1 0, 0 0)))")
)
assert not circle.covers(
wkt.loads("MULTIPOLYGON (((0.4 0.4, 0.4 0.45, 0.45 0.45, 0.45 0.4, 0.4 0.4)),((0 0, 0 1, 1 1, 1 0, 0 0)))")
)
assert circle.covers(wkt.loads("LINESTRING (-0.1 0, 0.2 0.3)"))
assert circle.covers(wkt.loads("LINESTRING (-0.5 0, 0 0.5, 0.5 0)"))
assert not circle.covers(wkt.loads("LINESTRING (-0.1 0, 0 1)"))
assert not circle.covers(wkt.loads("LINESTRING (0.4 0.4, 0.45 0.45)"))
assert circle.covers(wkt.loads("MULTILINESTRING ((-0.1 0, 0.2 0.3), (-0.5 0, 0 0.5, 0.5 0))"))
assert not circle.covers(wkt.loads("MULTILINESTRING ((-0.1 0, 0.2 0.3), (-0.1 0, 0 1))"))
assert not circle.covers(wkt.loads("MULTILINESTRING ((0.4 0.4, 0.45 0.45), (-0.1 0, 0 1))"))
def test_intersects(self):
circle = Circle(Point(0.0, 0.0), 0.5)
assert (circle.intersects(Point(0, 0)))
assert (circle.intersects(Point(0.1, 0.2)))
assert not (circle.intersects(Point(0.4, 0.4)))
assert not (circle.intersects(Point(-1, 0.4)))
assert circle.intersects(wkt.loads("MULTIPOINT ((0.1 0.1), (0.2 0.4))"))
assert circle.intersects(wkt.loads("MULTIPOINT ((0.1 0.1), (1.2 0.4))"))
assert not circle.intersects(wkt.loads("MULTIPOINT ((1.1 0.1), (0.2 1.4))"))
assert circle.intersects(wkt.loads("POLYGON ((-0.1 0.1, 0 0.4, 0.1 0.2, -0.1 0.1))"))
assert circle.intersects(wkt.loads("POLYGON ((-0.5 0, 0 0.5, 0.5 0, -0.5 0))"))
assert circle.intersects(wkt.loads("POLYGON ((0 0, 1 1, 1 0, 0 0))"))
assert circle.intersects(wkt.loads("POLYGON ((-1 -1, -1 1, 1 1, 1.5 0.5, 1 -1, -1 -1))"))
assert circle.intersects(
wkt.loads("POLYGON ((-1 -1, -1 1, 1 1, 1 -1, -1 -1),(-0.1 -0.1, 0.1 -0.1, 0.1 0.1, -0.1 0.1, -0.1 -0.1))")
)
assert not circle.intersects(wkt.loads("POLYGON ((0.4 0.4, 0.4 0.45, 0.45 0.45, 0.45 0.4, 0.4 0.4))"))
assert not circle.intersects(wkt.loads("POLYGON ((-1 0, -1 1, 0 1, 0 2, -1 2, -1 0))"))
assert not circle.intersects(
wkt.loads("POLYGON ((-1 -1, -1 1, 1 1, 1 -1, -1 -1),(-0.6 -0.6, 0.6 -0.6, 0.6 0.6, -0.6 0.6, -0.6 -0.6))")
)
assert circle.intersects(
wkt.loads("MULTIPOLYGON (((-0.1 0.1, 0 0.4, 0.1 0.2, -0.1 0.1)),((-0.5 0, 0 0.5, 0.5 0, -0.5 0)))")
)
assert circle.intersects(
wkt.loads("MULTIPOLYGON (((-0.1 0.1, 0 0.4, 0.1 0.2, -0.1 0.1)), ((-1 0, -1 1, 0 1, 0 2, -1 2, -1 0)))")
)
assert not circle.intersects(
wkt.loads(
"MULTIPOLYGON (((0.4 0.4, 0.4 0.45, 0.45 0.45, 0.45 0.4, 0.4 0.4)),((-1 0, -1 1, 0 1, 0 2, -1 2, -1 0)))"
))
assert circle.intersects(wkt.loads("LINESTRING (-1 -1, 1 1)"))
assert circle.intersects(wkt.loads("LINESTRING (-1 0.5, 1 0.5)"))
assert circle.intersects(wkt.loads("LINESTRING (0 0, 0.1 0.2)"))
assert not circle.intersects(wkt.loads("LINESTRING (0.4 0.4, 1 1)"))
assert not circle.intersects(wkt.loads("LINESTRING (-0.4 -0.4, -2 -3.2)"))
assert not circle.intersects(wkt.loads("LINESTRING (0.1 0.5, 1 0.5)"))
assert circle.intersects(wkt.loads("MULTILINESTRING ((-1 -1, 1 1), (-1 0.5, 1 0.5))"))
assert circle.intersects(wkt.loads("MULTILINESTRING ((-1 -1, 1 1), (0.4 0.4, 1 1))"))
assert not circle.intersects(wkt.loads("MULTILINESTRING ((0.1 0.5, 1 0.5), (0.4 0.4, 1 1))"))
def test_equality(self):
assert Circle(Point(-112.574945, 45.987772), 0.01) == Circle(Point(-112.574945, 45.987772), 0.01)
assert Circle(Point(-112.574945, 45.987772), 0.01) == Circle(Point(-112.574945, 45.987772), 0.01)
def test_radius(self):
polygon = wkt.loads(
"POLYGON ((-1 -1, -1 1, 1 1, 1 -1, -1 -1),(-0.6 -0.6, 0.6 -0.6, 0.6 0.6, -0.6 0.6, -0.6 -0.6))"
)
circle = Circle(polygon, 1.0)
pytest.approx(circle.radius, 1.414213, 0.001)
pytest.approx(circle.MBR.minx, -1.414213, 0.001)
pytest.approx(circle.MBR.maxx, 1.414213, 0.001)
pytest.approx(circle.MBR.miny, -1.414213, 0.001)
pytest.approx(circle.MBR.maxy, 1.414213, 0.001) |
299,983 | strip prefix | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers to connect to remote servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.distribute.cluster_resolver import cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.platform import remote_utils
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_GRPC_PREFIX = "grpc://"
@tf_export("config.experimental_connect_to_host")
def connect_to_remote_host(remote_host=None, job_name="worker"):
"""Connects to a single machine to enable remote execution on it.
Will make devices on the remote host available to use. Note that calling this
more than once will work, but will invalidate any tensor handles on the old
remote devices.
Using the default job_name of worker, you can schedule ops to run remotely as
follows:
```python
# Enable eager execution, and connect to the remote host.
tf.compat.v1.enable_eager_execution()
tf.contrib.eager.connect_to_remote_host("exampleaddr.com:9876")
with ops.device("job:worker/replica:0/task:1/device:CPU:0"):
# The following tensors should be resident on the remote device, and the op
# will also execute remotely.
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
```
Args:
remote_host: a single or a list the remote server addr in host-port format.
job_name: The job name under which the new server will be accessible.
Raises:
ValueError: if remote_host is None.
"""
if not remote_host:
raise ValueError("Must provide at least one remote_host")
remote_hosts = nest.flatten(remote_host)
cluster_spec = server_lib.ClusterSpec(
{job_name: [METHOD_NAME(host, _GRPC_PREFIX) for host in remote_hosts]})
connect_to_cluster(cluster_spec)
@tf_export("config.experimental_connect_to_cluster")
def connect_to_cluster(cluster_spec_or_resolver,
job_name="localhost",
task_index=0,
protocol=None):
"""Connects to the given cluster.
Will make devices on the cluster available to use. Note that calling this more
than once will work, but will invalidate any tensor handles on the old remote
devices.
If the given local job name is not present in the cluster specification, it
will be automatically added, using an unused port on the localhost.
Args:
cluster_spec_or_resolver: A `ClusterSpec` or `ClusterResolver` describing
the cluster.
job_name: The name of the local job.
task_index: The local task index.
protocol: The communication protocol, such as `"grpc"`. If unspecified, will
use the default from `python/platform/remote_utils.py`.
"""
protocol = protocol or remote_utils.get_default_communication_protocol()
if isinstance(cluster_spec_or_resolver, server_lib.ClusterSpec):
cluster_spec = cluster_spec_or_resolver
elif isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver):
cluster_spec = cluster_spec_or_resolver.cluster_spec()
else:
raise ValueError(
"`cluster_spec_or_resolver` must be a `ClusterSpec` or a "
"`ClusterResolver`.")
cluster_def = cluster_spec.as_cluster_def()
# Automatically add local job, if not part of the cluster spec.
if job_name not in cluster_spec.jobs:
local_port = pywrap_tensorflow.TF_PickUnusedPortOrDie()
job_def = cluster_def.job.add()
job_def.name = job_name
# TODO(fishx): Update this to make sure remote worker has valid ip address
# to connect with local.
job_def.tasks[0] = "localhost:{}".format(local_port)
server_def = ServerDef(
cluster=cluster_def, job_name=job_name, task_index=task_index,
protocol=protocol)
# TODO(nareshmodi): Make this default since it works in more situations.
os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1"
context.set_server_def(server_def)
def METHOD_NAME(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s |
299,984 | test file pointer could be reused | import io
import re
import sys
import warnings
import pytest
from PIL import Image, WebPImagePlugin, features
from .helper import (
assert_image_equal,
assert_image_similar,
assert_image_similar_tofile,
hopper,
skip_unless_feature,
)
try:
from PIL import _webp
HAVE_WEBP = True
except ImportError:
HAVE_WEBP = False
class TestUnsupportedWebp:
def test_unsupported(self):
if HAVE_WEBP:
WebPImagePlugin.SUPPORTED = False
file_path = "Tests/images/hopper.webp"
with pytest.warns(UserWarning):
with pytest.raises(OSError):
with Image.open(file_path):
pass
if HAVE_WEBP:
WebPImagePlugin.SUPPORTED = True
@skip_unless_feature("webp")
class TestFileWebp:
def setup_method(self):
self.rgb_mode = "RGB"
def test_version(self):
_webp.WebPDecoderVersion()
_webp.WebPDecoderBuggyAlpha()
assert re.search(r"\d+\.\d+\.\d+$", features.version_module("webp"))
def test_read_rgb(self):
"""
Can we read a RGB mode WebP file without error?
Does it have the bits we expect?
"""
with Image.open("Tests/images/hopper.webp") as image:
assert image.mode == self.rgb_mode
assert image.size == (128, 128)
assert image.format == "WEBP"
image.load()
image.getdata()
# generated with:
# dwebp -ppm ../../Tests/images/hopper.webp -o hopper_webp_bits.ppm
assert_image_similar_tofile(image, "Tests/images/hopper_webp_bits.ppm", 1.0)
def _roundtrip(self, tmp_path, mode, epsilon, args={}):
temp_file = str(tmp_path / "temp.webp")
hopper(mode).save(temp_file, **args)
with Image.open(temp_file) as image:
assert image.mode == self.rgb_mode
assert image.size == (128, 128)
assert image.format == "WEBP"
image.load()
image.getdata()
if mode == self.rgb_mode:
# generated with: dwebp -ppm temp.webp -o hopper_webp_write.ppm
assert_image_similar_tofile(
image, "Tests/images/hopper_webp_write.ppm", 12.0
)
# This test asserts that the images are similar. If the average pixel
# difference between the two images is less than the epsilon value,
# then we're going to accept that it's a reasonable lossy version of
# the image.
target = hopper(mode)
if mode != self.rgb_mode:
target = target.convert(self.rgb_mode)
assert_image_similar(image, target, epsilon)
def test_write_rgb(self, tmp_path):
"""
Can we write a RGB mode file to webp without error?
Does it have the bits we expect?
"""
self._roundtrip(tmp_path, self.rgb_mode, 12.5)
def test_write_method(self, tmp_path):
self._roundtrip(tmp_path, self.rgb_mode, 12.0, {"method": 6})
buffer_no_args = io.BytesIO()
hopper().save(buffer_no_args, format="WEBP")
buffer_method = io.BytesIO()
hopper().save(buffer_method, format="WEBP", method=6)
assert buffer_no_args.getbuffer() != buffer_method.getbuffer()
@skip_unless_feature("webp_anim")
def test_save_all(self, tmp_path):
temp_file = str(tmp_path / "temp.webp")
im = Image.new("RGB", (1, 1))
im2 = Image.new("RGB", (1, 1), "#f00")
im.save(temp_file, save_all=True, append_images=[im2])
with Image.open(temp_file) as reloaded:
assert_image_equal(im, reloaded)
reloaded.seek(1)
assert_image_similar(im2, reloaded, 1)
def test_icc_profile(self, tmp_path):
self._roundtrip(tmp_path, self.rgb_mode, 12.5, {"icc_profile": None})
if _webp.HAVE_WEBPANIM:
self._roundtrip(
tmp_path, self.rgb_mode, 12.5, {"icc_profile": None, "save_all": True}
)
def test_write_unsupported_mode_L(self, tmp_path):
"""
Saving a black-and-white file to WebP format should work, and be
similar to the original file.
"""
self._roundtrip(tmp_path, "L", 10.0)
def test_write_unsupported_mode_P(self, tmp_path):
"""
Saving a palette-based file to WebP format should work, and be
similar to the original file.
"""
self._roundtrip(tmp_path, "P", 50.0)
@pytest.mark.skipif(sys.maxsize <= 2**32, reason="Requires 64-bit system")
def test_write_encoding_error_message(self, tmp_path):
temp_file = str(tmp_path / "temp.webp")
im = Image.new("RGB", (15000, 15000))
with pytest.raises(ValueError) as e:
im.save(temp_file, method=0)
assert str(e.value) == "encoding error 6"
def test_WebPEncode_with_invalid_args(self):
"""
Calling encoder functions with no arguments should result in an error.
"""
if _webp.HAVE_WEBPANIM:
with pytest.raises(TypeError):
_webp.WebPAnimEncoder()
with pytest.raises(TypeError):
_webp.WebPEncode()
def test_WebPDecode_with_invalid_args(self):
"""
Calling decoder functions with no arguments should result in an error.
"""
if _webp.HAVE_WEBPANIM:
with pytest.raises(TypeError):
_webp.WebPAnimDecoder()
with pytest.raises(TypeError):
_webp.WebPDecode()
def test_no_resource_warning(self, tmp_path):
file_path = "Tests/images/hopper.webp"
with Image.open(file_path) as image:
temp_file = str(tmp_path / "temp.webp")
with warnings.catch_warnings():
image.save(temp_file)
def METHOD_NAME(self):
file_path = "Tests/images/hopper.webp"
with open(file_path, "rb") as blob:
Image.open(blob).load()
Image.open(blob).load()
@pytest.mark.parametrize(
"background",
(0, (0,), (-1, 0, 1, 2), (253, 254, 255, 256)),
)
@skip_unless_feature("webp_anim")
def test_invalid_background(self, background, tmp_path):
temp_file = str(tmp_path / "temp.webp")
im = hopper()
with pytest.raises(OSError):
im.save(temp_file, save_all=True, append_images=[im], background=background)
@skip_unless_feature("webp_anim")
def test_background_from_gif(self, tmp_path):
# Save L mode GIF with background
with Image.open("Tests/images/no_palette_with_background.gif") as im:
out_webp = str(tmp_path / "temp.webp")
im.save(out_webp, save_all=True)
# Save P mode GIF with background
with Image.open("Tests/images/chi.gif") as im:
original_value = im.convert("RGB").getpixel((1, 1))
# Save as WEBP
out_webp = str(tmp_path / "temp.webp")
im.save(out_webp, save_all=True)
# Save as GIF
out_gif = str(tmp_path / "temp.gif")
with Image.open(out_webp) as im:
im.save(out_gif)
with Image.open(out_gif) as reread:
reread_value = reread.convert("RGB").getpixel((1, 1))
difference = sum(abs(original_value[i] - reread_value[i]) for i in range(0, 3))
assert difference < 5
@skip_unless_feature("webp_anim")
def test_duration(self, tmp_path):
with Image.open("Tests/images/dispose_bgnd.gif") as im:
assert im.info["duration"] == 1000
out_webp = str(tmp_path / "temp.webp")
im.save(out_webp, save_all=True)
with Image.open(out_webp) as reloaded:
assert reloaded.info["duration"] == 1000 |
299,985 | upload step remote | import logging
import os
import shutil
import tempfile
from abc import abstractmethod
from pathlib import Path
from typing import Any, Union
from tango.common.aliases import PathOrStr
from tango.common.exceptions import TangoError
from tango.common.file_lock import FileLock
from tango.common.params import Params
from tango.common.remote_utils import RemoteConstants
from tango.step import Step
from tango.step_cache import CacheMetadata
from tango.step_caches.local_step_cache import LocalStepCache
from tango.step_info import StepInfo
logger = logging.getLogger(__name__)
class RemoteNotFoundError(TangoError):
"""
Classes inheriting from the RemoteStepCache should raise this if a step result object is not found.
"""
# This class inherits from `LocalStepCache` to benefit from its in-memory "weak cache" and "strong cache",
# but it handles saving artifacts to disk a little differently.
class RemoteStepCache(LocalStepCache):
"""
This is a :class:`~tango.step_cache.StepCache` that's used by :class:`RemoteWorkspace`.
It stores the results of steps on some RemoteWorkspace.
It also keeps a limited in-memory cache as well as a local backup on disk, so fetching a
step's resulting subsequent times should be fast.
.. tip::
All remote step caches inherit from this.
"""
Constants = RemoteConstants
def __init__(self, local_dir: Path):
super().__init__(local_dir)
@abstractmethod
def _step_result_remote(self, step: Union[Step, StepInfo]):
raise NotImplementedError()
@abstractmethod
def METHOD_NAME(self, step: Step, objects_dir: Path):
raise NotImplementedError()
@abstractmethod
def _download_step_remote(self, step_result, target_dir: PathOrStr) -> None:
raise NotImplementedError()
@abstractmethod
def __len__(self):
raise NotImplementedError()
def _acquire_step_lock_file(self, step: Union[Step, StepInfo], read_only_ok: bool = False):
return FileLock(
self.step_dir(step).with_suffix(".lock"), read_only_ok=read_only_ok
).acquire_with_updates(desc=f"acquiring step cache lock for '{step.unique_id}'")
def __contains__(self, step: Any) -> bool:
if isinstance(step, (Step, StepInfo)):
cacheable = step.cache_results if isinstance(step, Step) else step.cacheable
if not cacheable:
return False
key = step.unique_id
# First check if we have a copy in memory.
if key in self.strong_cache:
return True
if key in self.weak_cache:
return True
# Then check if we have a copy on disk in our cache directory.
with self._acquire_step_lock_file(step, read_only_ok=True):
if self.step_dir(step).is_dir():
return True
# If not, check the remote location.
return self._step_result_remote(step) is not None
else:
return False
def __getitem__(self, step: Union[Step, StepInfo]) -> Any:
key = step.unique_id
step_result = self._step_result_remote(step)
if step_result is None:
raise KeyError(step)
# Try getting the result from our in-memory caches first.
result = self._get_from_cache(key)
if result is not None:
return result
def load_and_return():
metadata = CacheMetadata.from_params(Params.from_file(self._metadata_path(step)))
result = metadata.format.read(self.step_dir(step) / self.Constants.STEP_RESULT_DIR)
self._add_to_cache(key, result)
return result
# Next check our local on-disk cache.
with self._acquire_step_lock_file(step, read_only_ok=True):
if self.step_dir(step).is_dir():
return load_and_return()
# Finally, check the remote location for the corresponding dataset.
with self._acquire_step_lock_file(step):
# Make sure the step wasn't cached since the last time we checked (above).
if self.step_dir(step).is_dir():
return load_and_return()
# We'll download the dataset to a temporary directory first, in case something goes wrong.
temp_dir = tempfile.mkdtemp(dir=self.dir, prefix=key)
try:
self._download_step_remote(step_result, target_dir=temp_dir)
# Download and extraction was successful, rename temp directory to final step result directory.
os.replace(temp_dir, self.step_dir(step))
except RemoteNotFoundError:
raise KeyError(step)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
return load_and_return()
def __setitem__(self, step: Step, value: Any) -> None:
if not step.cache_results:
logger.warning("Tried to cache step %s despite being marked as uncacheable.", step.name)
return
with self._acquire_step_lock_file(step):
# We'll write the step's results to temporary directory first, and try to upload to
# remote workspace from there in case anything goes wrong.
temp_dir = Path(tempfile.mkdtemp(dir=self.dir, prefix=step.unique_id))
(temp_dir / self.Constants.STEP_RESULT_DIR).mkdir()
try:
step.format.write(value, temp_dir / self.Constants.STEP_RESULT_DIR)
metadata = CacheMetadata(step=step.unique_id, format=step.format)
metadata.to_params().to_file(temp_dir / self.METADATA_FILE_NAME)
# Create the dataset and upload serialized result to it.
self.METHOD_NAME(step, temp_dir)
# Upload successful, rename temp directory to the final step result directory.
if self.step_dir(step).is_dir():
shutil.rmtree(self.step_dir(step), ignore_errors=True)
os.replace(temp_dir, self.step_dir(step))
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
# Finally, add to in-memory caches.
self._add_to_cache(step.unique_id, value) |
299,986 | list1 default | # (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Demonstrate the CSVListEditor class.
This editor allows the user to enter a *single* line of input text, containing
comma-separated values (or another separator may be specified). Your program
specifies an element Trait type of Int, Float, Str, Enum, or Range.
Please refer to the `CSVListEditor API docs`_ for further information.
.. _CSVListEditor API docs: https://docs.enthought.com/traitsui/api/traitsui.editors.csv_list_editor.html#traitsui.editors.csv_list_editor.CSVListEditor
"""
from traits.api import (
HasTraits,
List,
Int,
Float,
Enum,
Range,
Str,
Button,
Property,
observe,
)
from traitsui.api import (
View,
Item,
Label,
Heading,
VGroup,
HGroup,
UItem,
spring,
TextEditor,
CSVListEditor,
)
class CSVListEditorDemo(HasTraits):
list1 = List(Int)
list2 = List(Float)
list3 = List(Str, maxlen=3)
list4 = List(Enum('red', 'green', 'blue', 2, 3))
list5 = List(Range(low=0.0, high=10.0))
# 'low' and 'high' are used to demonstrate lists containing dynamic ranges.
low = Float(0.0)
high = Float(1.0)
list6 = List(Range(low=-1.0, high='high'))
list7 = List(Range(low='low', high='high'))
pop1 = Button("Pop from first list")
sort1 = Button("Sort first list")
# This will be str(self.list1).
list1str = Property(Str, observe='list1')
traits_view = View(
HGroup(
# This VGroup forms the column of CSVListEditor examples.
VGroup(
Item(
'list1',
label="List(Int)",
editor=CSVListEditor(ignore_trailing_sep=False),
tooltip='options: ignore_trailing_sep=False',
),
Item(
'list1',
label="List(Int)",
style='readonly',
editor=CSVListEditor(),
),
Item(
'list2',
label="List(Float)",
editor=CSVListEditor(enter_set=True, auto_set=False),
tooltip='options: enter_set=True, auto_set=False',
),
Item(
'list3',
label="List(Str, maxlen=3)",
editor=CSVListEditor(),
),
Item(
'list4',
label="List(Enum('red', 'green', 'blue', 2, 3))",
editor=CSVListEditor(sep=None),
tooltip='options: sep=None',
),
Item(
'list5',
label="List(Range(low=0.0, high=10.0))",
editor=CSVListEditor(),
),
Item(
'list6',
label="List(Range(low=-1.0, high='high'))",
editor=CSVListEditor(),
),
Item(
'list7',
label="List(Range(low='low', high='high'))",
editor=CSVListEditor(),
),
springy=True,
),
# This VGroup forms the right column; it will display the
# Python str representation of the lists.
VGroup(
UItem(
'list1str',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list1str',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list2',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list3',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list4',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list5',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list6',
editor=TextEditor(),
enabled_when='False',
width=240,
),
UItem(
'list7',
editor=TextEditor(),
enabled_when='False',
width=240,
),
),
),
'_',
HGroup('low', 'high', spring, UItem('pop1'), UItem('sort1')),
Heading("Notes"),
Label(
"Hover over a list to see which editor options are set, " "if any."
),
Label(
"The editor of the first list, List(Int), uses "
"ignore_trailing_sep=False, so a trailing comma is "
"an error."
),
Label("The second list is a read-only view of the first list."),
Label(
"The editor of the List(Float) example has enter_set=True "
"and auto_set=False; press Enter to validate."
),
Label("The List(Str) example will accept at most 3 elements."),
Label(
"The editor of the List(Enum(...)) example uses sep=None, "
"i.e. whitespace acts as a separator."
),
Label(
"The last three List(Range(...)) examples take neither, one or "
"both of their limits from the Low and High fields below."
),
width=720,
title="CSVListEditor Demonstration",
)
def METHOD_NAME(self):
return [1, 4, 0, 10]
def _get_list1str(self):
return str(self.list1)
@observe("pop1")
def _pop_from_list1(self, event):
if len(self.list1) > 0:
x = self.list1.pop()
print(x)
@observe('sort1')
def _sort_list1(self, event):
self.list1.sort()
if __name__ == "__main__":
demo = CSVListEditorDemo()
demo.configure_traits() |
299,987 | reset | #!/usr/bin/env python3
import re
import argparse
import os
import gi
import json
import subprocess
gi.require_version('Flatpak', '1.0')
from gi.repository import Flatpak
from gi.repository import GLib
def get_bisection_data():
return {'ref': None, 'good': None, 'bad': None,
'refs': None, 'log': None, 'messages': None}
class Bisector():
def load_cache(self):
try:
os.makedirs(os.path.join(GLib.get_user_cache_dir(), 'flatpak'))
except FileExistsError:
pass
self.cache_path = os.path.join(GLib.get_user_cache_dir(),
'flatpak', '%s-%s-bisect.status' % (
self.name, self.branch))
try:
with open(self.cache_path, 'rb') as f:
self.data = json.load(f)
except FileNotFoundError:
self.data = None
def dump_data(self):
with open(self.cache_path, 'w') as f:
json.dump(self.data, f)
def setup_flatpak_app(self):
self.installation = Flatpak.Installation.new_user()
kind = Flatpak.RefKind.APP
if self.runtime:
kind = Flatpak.RefKind.RUNTIME
try:
self.cref = self.installation.get_installed_ref(kind, self.name, None, self.branch, None)
except GLib.Error as e:
print("%s\n\nMake sure %s is installed as a "
"user (flatpak install --user) and specify `--runtime`"
" if it is a runtime." % (e, self.name))
return -1
return 0
def run(self):
self.name = self.name[0]
self.load_cache()
res = self.setup_flatpak_app()
if res:
return res
try:
func = getattr(self, self.subparser_name)
except AttributeError:
print('No action called %s' % self.subparser_name)
return -1
res = func()
if self.data:
self.dump_data()
return res
def set_reference_commits(self, set_name, check_name):
if not self.data:
print("You need to first start the bisection")
return -1
ref = self.cref.get_latest_commit()
if self.data[check_name] == ref:
print('Commit %s is already set as %s...' % (
ref, check_name))
return 1
if ref not in self.data['refs']:
print("%s is not a known commit." % ref)
return -1
print("Setting %s as %s commit" % (ref, set_name))
self.data[set_name] = ref
if self.data[set_name] and self.data[check_name]:
x1 = self.data['refs'].index(self.data['good'])
x2 = self.data['refs'].index(self.data['bad'])
refs = self.data['refs'][x1:x2]
if not refs:
print("=========================="
"First bad commit is:\n%s"
"==========================" % self.data['message'][self.data['bad']])
exit(0)
ref = refs[int(len(refs) / 2)]
if self.data['good'] == ref:
print("\n==========================\n"
"First bad commit is:\n\n%s"
"==========================" % self.data['messages'][self.data['bad']])
exit(0)
return self.checkout(ref)
return -1
def load_refs(self):
repodir, refname = self.download_history()
history = subprocess.check_output(['ostree', 'log', '--repo', repodir, refname]).decode()
refs = []
messages = {}
message = ""
_hash = ''
for l in history.split('\n'):
rehash = re.search('(?<=^commit )\w+', l)
if rehash:
if message:
messages[_hash] = message
_hash = rehash.group(0)
refs.insert(0, _hash)
message = ""
message += l + '\n'
if message:
messages[_hash] = message
self.data['refs'] = refs
self.data['log'] = history
self.data['messages'] = messages
def good(self):
if not self.data['bad']:
print("Set the bad commit first")
exit(-1)
return self.set_reference_commits('good', 'bad')
def bad(self):
return self.set_reference_commits('bad', 'good')
def start(self):
if self.data:
print('Bisection already started')
return -1
print("Updating to %s latest commit" % self.name)
self.METHOD_NAME(False)
self.data = get_bisection_data()
self.load_refs()
def download_history(self):
print("Getting history")
appidir = os.path.abspath(os.path.join(self.cref.get_deploy_dir(), '..'))
dirname = "app"
if self.runtime:
dirname = "runtime"
appidir = appidir.split('/%s/' % dirname)
repodir = os.path.join(appidir[0], 'repo')
refname = self.cref.get_origin() + ':' + dirname + '/' + self.cref.get_name() + '/' + self.cref.get_arch() + '/' + self.cref.get_branch()
# FIXME Getting `error: Exceeded maximum recursion` in ostree if using --depth=-1 (or > 250)
subprocess.call(['ostree', 'pull', '--depth=250', '--commit-metadata-only', '--repo', repodir, refname])
return repodir, refname
def log(self):
if self.data:
cmd = ['echo', self.data['log']]
else:
repodir, refname = self.download_history()
cmd = ['ostree', 'log', '--repo', repodir, refname]
pager = os.environ.get('PAGER')
if pager:
stdout = subprocess.PIPE
else:
stdout = None
p = subprocess.Popen(cmd, stdout=stdout)
if pager:
subprocess.check_call((pager), stdin=p.stdout)
p.wait()
def checkout(self, commit=None):
if not commit:
commit = self.commit[0]
refname = self.cref.get_name() + '/' + self.cref.get_arch() + '/' + self.cref.get_branch()
print("Checking out %s" % commit)
return subprocess.call(['flatpak', 'update', '--user', refname, '--commit', commit])
def METHOD_NAME(self, v=True):
if not self.data:
if v:
print("Not bisecting, nothing to reset")
return -1
refname = self.cref.get_name() + '/' + self.cref.get_arch() + '/' + self.cref.get_branch()
print("Removing %s" % self.cache_path)
os.remove(self.cache_path)
self.data = None
return subprocess.call(['flatpak', 'update', '--user', refname])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('name', nargs=1, help='Application/Runtime to bisect')
parser.add_argument('-b', '--branch', default='master', help='The branch to bisect')
parser.add_argument('-r', '--runtime', action="store_true", help='Bisecting a runtime not an app')
subparsers = parser.add_subparsers(dest='subparser_name')
subparsers.required = True
start_parser = subparsers.add_parser('start', help="Start bisection")
bad_parser = subparsers.add_parser('bad', help="Set current version as bad")
good_parser = subparsers.add_parser('good', help="Set current version as good")
log_parser = subparsers.add_parser('log', help="Download and print application commit history")
checkout_parser = subparsers.add_parser('checkout', help="Checkout defined commit")
checkout_parser.add_argument('commit', nargs=1, help='The commit hash to checkout')
reset_parser = subparsers.add_parser('reset', help="Reset all bisecting data and go back to latest commit")
bisector = Bisector()
options = parser.parse_args(namespace=bisector)
bisector.run() |
299,988 | test iter decode | # coding: utf-8
"""
webencodings.tests
~~~~~~~~~~~~~~~~~~
A basic test suite for Encoding.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
IncrementalDecoder, IncrementalEncoder, UTF8)
def assert_raises(exception, function, *args, **kwargs):
try:
function(*args, **kwargs)
except exception:
return
else: # pragma: no cover
raise AssertionError('Did not raise %s.' % exception)
def test_labels():
assert lookup('utf-8').name == 'utf-8'
assert lookup('Utf-8').name == 'utf-8'
assert lookup('UTF-8').name == 'utf-8'
assert lookup('utf8').name == 'utf-8'
assert lookup('utf8').name == 'utf-8'
assert lookup('utf8 ').name == 'utf-8'
assert lookup(' \r\nutf8\t').name == 'utf-8'
assert lookup('u8') is None # Python label.
assert lookup('utf-8 ') is None # Non-ASCII white space.
assert lookup('US-ASCII').name == 'windows-1252'
assert lookup('iso-8859-1').name == 'windows-1252'
assert lookup('latin1').name == 'windows-1252'
assert lookup('LATIN1').name == 'windows-1252'
assert lookup('latin-1') is None
assert lookup('LATİN1') is None # ASCII-only case insensitivity.
def test_all_labels():
for label in LABELS:
assert decode(b'', label) == ('', lookup(label))
assert encode('', label) == b''
for repeat in [0, 1, 12]:
output, _ = iter_decode([b''] * repeat, label)
assert list(output) == []
assert list(iter_encode([''] * repeat, label)) == []
decoder = IncrementalDecoder(label)
assert decoder.decode(b'') == ''
assert decoder.decode(b'', final=True) == ''
encoder = IncrementalEncoder(label)
assert encoder.encode('') == b''
assert encoder.encode('', final=True) == b''
# All encoding names are valid labels too:
for name in set(LABELS.values()):
assert lookup(name).name == name
def test_invalid_label():
assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
assert_raises(LookupError, encode, 'é', 'invalid')
assert_raises(LookupError, iter_decode, [], 'invalid')
assert_raises(LookupError, iter_encode, [], 'invalid')
assert_raises(LookupError, IncrementalDecoder, 'invalid')
assert_raises(LookupError, IncrementalEncoder, 'invalid')
def test_decode():
assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM
assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM
assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM
assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
def test_encode():
assert encode('é', 'latin1') == b'\xe9'
assert encode('é', 'utf8') == b'\xc3\xa9'
assert encode('é', 'utf8') == b'\xc3\xa9'
assert encode('é', 'utf-16') == b'\xe9\x00'
assert encode('é', 'utf-16le') == b'\xe9\x00'
assert encode('é', 'utf-16be') == b'\x00\xe9'
def METHOD_NAME():
def iter_decode_to_string(input, fallback_encoding):
output, _encoding = iter_decode(input, fallback_encoding)
return ''.join(output)
assert iter_decode_to_string([], 'latin1') == ''
assert iter_decode_to_string([b''], 'latin1') == ''
assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([
b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([
b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
assert iter_decode_to_string([
b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
assert iter_decode_to_string([
b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
assert iter_decode_to_string([
b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
def test_iter_encode():
assert b''.join(iter_encode([], 'latin1')) == b''
assert b''.join(iter_encode([''], 'latin1')) == b''
assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
assert b''.join(iter_encode([
'', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
def test_x_user_defined():
encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
encoded = b'aa'
decoded = 'aa'
assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
assert encode(decoded, 'x-user-defined') == encoded |
299,989 | private link service connection state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, METHOD_NAME=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the private endpoint connection.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def METHOD_NAME(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20230701:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
METHOD_NAME=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the managed cluster resource.
"""
... |
299,990 | pandas | import geopandas
from shapely.geometry import Point, mapping, shape
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
# This class defines a Metric to support your Expectation
# For most Expectations, the main business logic for calculation will live here.
# To learn about the relationship between Metrics and Expectations, please visit {some doc}.
class ColumnValuesHaveElevation(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see {some doc} for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.elevated"
condition_value_keys = ()
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def METHOD_NAME(cls, column, **kwargs):
column = column.apply(shape)
# Set crs to meters
geo_ser = geopandas.GeoSeries(column, crs={"proj": "cea"})
# access the length of the column
return ~geo_ser.z.isnull()
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
class ExpectColumnValuesToHaveElevation(ColumnMapExpectation):
"""Expect the column values to be points that have elevation."""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"elevated": [
mapping(
Point(1, 1, 1),
),
mapping(
Point(2, 2, 2),
),
mapping(Point(3, 3, 3)),
],
"not_elevated": [
mapping(Point(1, 1)),
mapping(Point(2, 2)),
mapping(Point(3, 3)),
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "elevated",
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "not_elevated",
},
"out": {
"success": False,
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"geospatial",
"hackathon-22",
], # Tags for this Expectation in the gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73",
],
"requirements": ["geopandas", "shapely"],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.elevated"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1.0,
}
if __name__ == "__main__":
ExpectColumnValuesToHaveElevation().print_diagnostic_checklist() |
299,991 | check if dbz running | import socket
import re
import backoff
from . import basetest
from .runner import CfLocalRunnerWithPostgreSQL
# Constants
KAFKA_CLUSTER_IMAGE_NAME = "johnnypark/kafka-zookeeper"
KAFKA_CLUSTER_IMAGE_VERSION = "2.4.0"
KAFKA_CLUSTER_NAME = "kafka-cluster"
KAFKA_CONNECT_URL = "http://localhost:8083"
KAFKA_PG_CONNECTOR_NAME = "mx-databroker-PostgreSQL-source-connector"
KAFKA_PG_CONNECTOR_STATUS_API = "{}/connectors/{}/status".format(
KAFKA_CONNECT_URL,
KAFKA_PG_CONNECTOR_NAME,
)
KAFKA_BROKER_PORT = 9092
KAFKA_ZOOKEEPER_PORT = 2181
DATABROKER_TOPIC_FORMAT_VERSION = "1_0_0"
POSTGRES_DB_DOCKER_IMAGE = "debezium/postgres"
POSTGRES_DB_VERSION = "9.6-alpine"
MAX_RETRY_COUNT = 8
BACKOFF_TIME = 10
class CfLocalRunnerWithKafka(CfLocalRunnerWithPostgreSQL):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._database_postgres_image = POSTGRES_DB_DOCKER_IMAGE
self._database_postgres_version = POSTGRES_DB_VERSION
self._kafka_container_name = "{}-{}".format(self._app_name, KAFKA_CLUSTER_NAME)
def _get_environment(self, env_vars):
environment = super()._get_environment(env_vars)
environment.update(
{
"MX_MyFirstModule_broker_url": "{}:{}".format(
self.get_host(),
KAFKA_BROKER_PORT,
)
}
)
return environment
def _start_kafka_cluster(self):
result = self._cmd(
(
"docker",
"run",
"--name",
self._kafka_container_name,
"-p",
"{}:{}".format(KAFKA_BROKER_PORT, KAFKA_BROKER_PORT),
"-e",
"ADVERTISED_HOST={}".format(self._host),
"-e",
"NUM_PARTITIONS={}".format(3),
"-d",
"{}:{}".format(
KAFKA_CLUSTER_IMAGE_NAME,
KAFKA_CLUSTER_IMAGE_VERSION,
),
)
)
if not result[1]:
raise RuntimeError(
"Cannot create {} container: {}".format(
KAFKA_CLUSTER_NAME,
result[0],
)
)
def stage(self, *args, **kwargs):
result = super().stage(*args, **kwargs)
self._start_kafka_cluster()
@backoff.on_predicate(backoff.expo, lambda x: x > 0, max_time=30)
def _await_kafka_cluster():
return socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex(
("localhost", KAFKA_BROKER_PORT)
)
_await_kafka_cluster()
return result
def is_debezium_running(self):
return self.run_on_container("curl " + KAFKA_PG_CONNECTOR_STATUS_API)
def is_azkarra_running(self):
topics = self.run_on_container(
f"./opt/kafka_2.12-{KAFKA_CLUSTER_IMAGE_VERSION}/bin/kafka-topics.sh "
f"--list --zookeeper localhost:{KAFKA_ZOOKEEPER_PORT}",
target_container=self._kafka_container_name,
)
expect_public_topic_pattern = rf".*?\.{DATABROKER_TOPIC_FORMAT_VERSION}"
return (
len(
re.findall(
r"(mx-databroker-connect-(?:configs|offsets|status))",
topics,
)
)
== 3
and len(re.findall(expect_public_topic_pattern, topics)) > 0
)
class TestCaseDataBroker(basetest.BaseTestWithPostgreSQL):
def _init_cflocal_runner(self, *args, **kwargs):
return CfLocalRunnerWithKafka(*args, **kwargs)
def test_databroker_running(self):
# os.environ[
# "PACKAGE_URL"
# ] = "https://dghq119eo3niv.cloudfront.net/test-app/MyProducer902.mda"
self.stage_container(
package="https://dghq119eo3niv.cloudfront.net/test-app/MyProducer902.mda",
env_vars={
"DATABROKER_ENABLED": "true",
"FORCED_MXRUNTIME_URL": "https://dghq119eo3niv.cloudfront.net/",
},
)
self.start_container()
# check app is running
self.assert_app_running()
@backoff.on_exception(
backoff.constant,
Exception,
interval=BACKOFF_TIME,
max_tries=MAX_RETRY_COUNT,
)
def METHOD_NAME():
return self._runner.is_debezium_running()
response = METHOD_NAME()
assert str(response).find('"state":"RUNNING"') > 0
# check azkarra is running by verify expected topics have been created
assert self._runner.is_azkarra_running()
# check streaming service
output = self.get_recent_logs()
assert output is not None
assert str(output).find("State transition from REBALANCING to RUNNING") >= 0 |
299,992 | test django admin cli command | """
:codeauthor: Jayesh Kariya <[email protected]>
Test cases for salt.modules.djangomod
"""
import pytest
import salt.modules.djangomod as djangomod
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
with patch("salt.utils.path.which", lambda exe: exe):
yield {djangomod: {}}
def test_command():
"""
Test if it runs arbitrary django management command
"""
mock = MagicMock(return_value=True)
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
assert djangomod.command("DJANGO_SETTINGS_MODULE", "validate")
def test_syncdb():
"""
Test if it runs the Django-Admin syncdb command
"""
mock = MagicMock(return_value=True)
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
assert djangomod.syncdb("DJANGO_SETTINGS_MODULE")
def test_migrate():
"""
Test if it runs the Django-Admin migrate command
"""
mock = MagicMock(return_value=True)
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
assert djangomod.migrate("DJANGO_SETTINGS_MODULE")
def test_createsuperuser():
"""
Test if it create a super user for the database.
"""
mock = MagicMock(return_value=True)
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
assert djangomod.createsuperuser(
"DJANGO_SETTINGS_MODULE", "SALT", "[email protected]"
)
def test_loaddata():
"""
Test if it loads fixture data
"""
mock = MagicMock(return_value=True)
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
assert djangomod.loaddata("DJANGO_SETTINGS_MODULE", "mydata")
def test_collectstatic():
"""
Test if it collect static files from each of your applications
into a single location
"""
mock = MagicMock(return_value=True)
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
assert djangomod.collectstatic("DJANGO_SETTINGS_MODULE")
def METHOD_NAME():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.command("settings.py", "runserver")
mock.assert_called_once_with(
"django-admin.py runserver --settings=settings.py",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_command_with_args():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.command(
"settings.py",
"runserver",
None,
None,
None,
None,
"noinput",
"somethingelse",
)
mock.assert_called_once_with(
"django-admin.py runserver --settings=settings.py "
"--noinput --somethingelse",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_command_with_kwargs():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.command(
"settings.py", "runserver", None, None, None, database="something"
)
mock.assert_called_once_with(
"django-admin.py runserver --settings=settings.py --database=something",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_command_with_kwargs_ignore_dunder():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.command(
"settings.py", "runserver", None, None, None, __ignore="something"
)
mock.assert_called_once_with(
"django-admin.py runserver --settings=settings.py",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_syncdb():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.syncdb("settings.py")
mock.assert_called_once_with(
"django-admin.py syncdb --settings=settings.py --noinput",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_syncdb_migrate():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.syncdb("settings.py", migrate=True)
mock.assert_called_once_with(
"django-admin.py syncdb --settings=settings.py --migrate --noinput",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_migrate():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.migrate("settings.py")
mock.assert_called_once_with(
"django-admin.py migrate --settings=settings.py --noinput",
python_shell=False,
env=None,
runas=None,
)
def test_django_admin_cli_createsuperuser():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.createsuperuser("settings.py", "testuser", "[email protected]")
assert mock.call_count == 1
mock.assert_called_with(
"django-admin.py createsuperuser --settings=settings.py --noinput "
"[email protected] --username=testuser",
env=None,
python_shell=False,
runas=None,
)
def no_test_loaddata():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.loaddata("settings.py", "app1,app2")
mock.assert_called_once_with(
"django-admin.py loaddata --settings=settings.py app1 app2",
)
def test_django_admin_cli_collectstatic():
mock = MagicMock()
with patch.dict(djangomod.__salt__, {"cmd.run": mock}):
djangomod.collectstatic(
"settings.py", None, True, "something", True, True, True, True
)
mock.assert_called_once_with(
"django-admin.py collectstatic --settings=settings.py "
"--noinput --no-post-process --dry-run --clear --link "
"--no-default-ignore --ignore=something",
python_shell=False,
env=None,
runas=None,
) |
299,993 | test ignore errors true | """IgnoreErrorsRule used with ansible-lint."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
class IgnoreErrorsRule(AnsibleLintRule):
"""Use failed_when and specify error conditions instead of using ignore_errors."""
id = "ignore-errors"
description = (
"Instead of ignoring all errors, ignore the errors only when using ``{{ ansible_check_mode }}``, "
"register the errors using ``register``, "
"or use ``failed_when:`` and specify acceptable error conditions "
"to reduce the risk of ignoring important failures."
)
severity = "LOW"
tags = ["unpredictability"]
version_added = "v5.0.7"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if (
task.get("ignore_errors")
and task.get("ignore_errors") != "{{ ansible_check_mode }}"
and not task.get("register")
):
return True
return False
if "pytest" in sys.modules:
import pytest
if TYPE_CHECKING:
from ansiblelint.testing import RunFromText # pylint: disable=ungrouped-imports
IGNORE_ERRORS_TRUE = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: true
"""
IGNORE_ERRORS_FALSE = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: false
"""
IGNORE_ERRORS_CHECK_MODE = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: "{{ ansible_check_mode }}"
"""
IGNORE_ERRORS_REGISTER = """
- hosts: all
tasks:
- name: Run apt-get update
command: apt-get update
ignore_errors: true
register: ignore_errors_register
"""
FAILED_WHEN = """
- hosts: all
tasks:
- name: Disable apport
become: 'yes'
lineinfile:
line: "enabled=0"
dest: /etc/default/apport
mode: 0644
state: present
register: default_apport
failed_when: default_apport.rc !=0 and not default_apport.rc == 257
"""
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def METHOD_NAME(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors."""
results = rule_runner.run_playbook(IGNORE_ERRORS_TRUE)
assert len(results) == 1
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_false(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors: false, oddly enough."""
results = rule_runner.run_playbook(IGNORE_ERRORS_FALSE)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_check_mode(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors: "{{ ansible_check_mode }}"."""
results = rule_runner.run_playbook(IGNORE_ERRORS_CHECK_MODE)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_ignore_errors_register(rule_runner: RunFromText) -> None:
"""The task uses ignore_errors: but output is registered and managed."""
results = rule_runner.run_playbook(IGNORE_ERRORS_REGISTER)
assert len(results) == 0
@pytest.mark.parametrize(
"rule_runner",
(IgnoreErrorsRule,),
indirect=["rule_runner"],
)
def test_failed_when(rule_runner: RunFromText) -> None:
"""Instead of ignore_errors, this task uses failed_when."""
results = rule_runner.run_playbook(FAILED_WHEN)
assert len(results) == 0 |
299,994 | aries container generate invitation | import asyncio
import json
import uuid
from runners.agent_container import AgentContainer, create_agent_with_args_list
######################################################################
# coroutine utilities
######################################################################
def run_coroutine(coroutine, *args, **kwargs):
loop = asyncio.get_event_loop()
if not loop:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coroutine(*args, **kwargs))
finally:
pass
# loop.close()
def async_sleep(delay):
run_coroutine(asyncio.sleep, delay)
######################################################################
# high level aries agent interface
######################################################################
def create_agent_container_with_args(in_args: list):
return run_coroutine(create_agent_with_args_list, in_args)
def aries_container_initialize(
the_container: AgentContainer,
schema_name: str = None,
schema_attrs: list = None,
):
run_coroutine(
the_container.initialize,
schema_name=schema_name,
schema_attrs=schema_attrs,
)
def agent_container_register_did(
the_container: AgentContainer,
did: str,
verkey: str,
role: str,
):
run_coroutine(
the_container.register_did,
did,
verkey,
role,
)
def aries_container_terminate(
the_container: AgentContainer,
):
return run_coroutine(the_container.terminate)
def METHOD_NAME(
the_container: AgentContainer,
):
return run_coroutine(
the_container.generate_invitation,
)
def aries_container_receive_invitation(
the_container: AgentContainer,
invite_details: dict,
):
return run_coroutine(
the_container.input_invitation,
invite_details,
)
def aries_container_detect_connection(
the_container: AgentContainer,
):
run_coroutine(the_container.detect_connection)
def aries_container_create_schema_cred_def(
the_container: AgentContainer,
schema_name: str,
schema_attrs: list,
version: str = None,
):
return run_coroutine(
the_container.create_schema_and_cred_def,
schema_name,
schema_attrs,
version=version,
)
def aries_container_issue_credential(
the_container: AgentContainer,
cred_def_id: str,
cred_attrs: list,
):
return run_coroutine(
the_container.issue_credential,
cred_def_id,
cred_attrs,
)
def aries_container_receive_credential(
the_container: AgentContainer,
cred_def_id: str,
cred_attrs: list,
):
return run_coroutine(
the_container.receive_credential,
cred_def_id,
cred_attrs,
)
def aries_container_request_proof(
the_container: AgentContainer,
proof_request: dict,
explicit_revoc_required: bool = False,
):
return run_coroutine(
the_container.request_proof,
proof_request,
explicit_revoc_required=explicit_revoc_required,
)
def aries_container_verify_proof(
the_container: AgentContainer,
proof_request: dict,
):
return run_coroutine(
the_container.verify_proof,
proof_request,
)
######################################################################
# aries agent admin api interface
######################################################################
######################################################################
# general utilities
######################################################################
def read_json_data(file_name: str):
with open("features/data/" + file_name) as data_file:
return json.load(data_file)
def read_schema_data(schema_name: str):
return read_json_data("schema_" + schema_name + ".json")
def read_credential_data(schema_name: str, cred_scenario_name: str):
schema_cred_data = read_json_data("cred_data_schema_" + schema_name + ".json")
cred_data = schema_cred_data[cred_scenario_name]
for attr in cred_data["attributes"]:
if attr["value"] == "@uuid":
attr["value"] = str(uuid.uuid4())
return cred_data["attributes"]
def read_proof_req_data(proof_req_name: str):
proof_request_info = read_json_data("proof_request_" + proof_req_name + ".json")
return proof_request_info["presentation_proposal"]
def read_presentation_data(presentation_name: str):
return read_json_data("presentation_" + presentation_name + ".json")
######################################################################
# probably obsolete ...
######################################################################
def agent_container_GET(
the_container: AgentContainer,
path: str,
text: bool = False,
params: dict = None,
) -> dict:
return run_coroutine(
the_container.admin_GET,
path,
text=text,
params=params,
)
def agent_container_POST(
the_container: AgentContainer,
path: str,
data: dict = None,
text: bool = False,
params: dict = None,
) -> dict:
return run_coroutine(
the_container.admin_POST,
path,
data=data,
text=text,
params=params,
)
def agent_container_PATCH(
the_container: AgentContainer,
path: str,
data: dict = None,
text: bool = False,
params: dict = None,
) -> dict:
return run_coroutine(
the_container.admin_PATCH,
path,
data=data,
text=text,
params=params,
)
def agent_container_PUT(
the_container: AgentContainer,
path: str,
data: dict = None,
text: bool = False,
params: dict = None,
) -> dict:
return run_coroutine(
the_container.admin_PUT,
path,
data=data,
text=text,
params=params,
) |
299,995 | set in slot | ###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import numpy
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.roi import roiFromShape, roiToSlice
class OpCacheFixer(Operator):
"""
Can be inserted in front of a cache operator to implement the "fixAtCurrent"
behavior currently implemented by multiple lazyflow caches.
While fixAtCurrent=False, this operator is merely a pass-through.
While fixAtCurrent=True, this operator does not forward dirty notifications
to downstream operators. Instead, it remembers the total ROI of the dirty area
(as a bounding box), and emits the entire dirty ROI at once as soon as it becomes "unfixed".
Also, this operator returns only zeros while fixAtCurrent=True.
"""
fixAtCurrent = InputSlot(value=False)
Input = InputSlot(allow_mask=True)
Output = OutputSlot(allow_mask=True)
def __init__(self, *args, **kwargs):
super(OpCacheFixer, self).__init__(*args, **kwargs)
self._fixed = False
self._fixed_dirty_roi = None
def setupOutputs(self):
self.Output.meta.assignFrom(self.Input.meta)
self.Output.meta.dontcache = self.fixAtCurrent.value
# During initialization, if fixAtCurrent is configured before Input, then propagateDirty was never called.
# We need to make sure that the dirty logic for fixAtCurrent has definitely been called here.
self.propagateDirty(self.fixAtCurrent, (), slice(None))
def execute(self, slot, subindex, roi, result):
if self._fixed:
# The downstream user doesn't know he's getting fake data.
# When we become "unfixed", we need to tell him.
self._expand_fixed_dirty_roi((roi.start, roi.stop))
result[:] = 0
else:
self.Input(roi.start, roi.stop).writeInto(result).wait()
def METHOD_NAME(self, slot, subindex, roi, value):
# Forward to the output
self.Output[roiToSlice(roi.start, roi.stop)] = value
entire_roi = roiFromShape(self.Input.meta.shape)
if (numpy.array((roi.start, roi.stop)) == entire_roi).all():
# Nothing is dirty any more.
self._init_fixed_dirty_roi()
def propagateDirty(self, slot, subindex, roi):
if slot is self.fixAtCurrent:
# If we're becoming UN-fixed, send out a big dirty notification
if (
self._fixed
and not self.fixAtCurrent.value
and self._fixed_dirty_roi
and (self._fixed_dirty_roi[1] - self._fixed_dirty_roi[0] > 0).all()
):
self.Output.setDirty(*self._fixed_dirty_roi)
self._fixed_dirty_roi = None
self._fixed = self.fixAtCurrent.value
elif slot is self.Input:
if self._fixed:
# We can't propagate this downstream,
# but we need to remember that it was marked dirty.
# Expand our dirty bounding box.
self._expand_fixed_dirty_roi((roi.start, roi.stop))
else:
self.Output.setDirty(roi.start, roi.stop)
def _init_fixed_dirty_roi(self):
# Intentionally flipped: nothing is dirty at first.
entire_roi = roiFromShape(self.Input.meta.shape)
self._fixed_dirty_roi = (entire_roi[1], entire_roi[0])
def _expand_fixed_dirty_roi(self, roi):
if self._fixed_dirty_roi is None:
self._init_fixed_dirty_roi()
start, stop = self._fixed_dirty_roi
start = numpy.minimum(start, roi[0])
stop = numpy.maximum(stop, roi[1])
self._fixed_dirty_roi = (start, stop) |
299,996 | get fasttest cmd | #!/usr/bin/env python3
import logging
import subprocess
import os
import csv
import sys
from github import Github
from env_helper import CACHES_PATH, TEMP_PATH
from pr_info import FORCE_TESTS_LABEL, PRInfo
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from upload_result_helper import upload_results
from docker_pull_helper import get_image_with_version
from commit_status_helper import (
post_commit_status,
fail_simple_check,
)
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
from tee_popen import TeePopen
from ccache_utils import get_ccache_if_not_exists, upload_ccache
NAME = "Fast test"
def METHOD_NAME(
workspace, output_path, ccache_path, repo_path, pr_number, commit_sha, image
):
return (
f"docker run --cap-add=SYS_PTRACE "
f"-e FASTTEST_WORKSPACE=/fasttest-workspace -e FASTTEST_OUTPUT=/test_output "
f"-e FASTTEST_SOURCE=/ClickHouse --cap-add=SYS_PTRACE "
f"-e PULL_REQUEST_NUMBER={pr_number} -e COMMIT_SHA={commit_sha} "
f"-e COPY_CLICKHOUSE_BINARY_TO_OUTPUT=1 "
f"--volume={workspace}:/fasttest-workspace --volume={repo_path}:/ClickHouse "
f"--volume={output_path}:/test_output "
f"--volume={ccache_path}:/fasttest-workspace/ccache {image}"
)
def process_results(result_folder):
test_results = []
additional_files = []
# Just upload all files from result_folder.
# If task provides processed results, then it's responsible for content of
# result_folder
if os.path.exists(result_folder):
test_files = [
f
for f in os.listdir(result_folder)
if os.path.isfile(os.path.join(result_folder, f))
]
additional_files = [os.path.join(result_folder, f) for f in test_files]
status = []
status_path = os.path.join(result_folder, "check_status.tsv")
if os.path.exists(status_path):
logging.info("Found test_results.tsv")
with open(status_path, "r", encoding="utf-8") as status_file:
status = list(csv.reader(status_file, delimiter="\t"))
if len(status) != 1 or len(status[0]) != 2:
logging.info("Files in result folder %s", os.listdir(result_folder))
return "error", "Invalid check_status.tsv", test_results, additional_files
state, description = status[0][0], status[0][1]
results_path = os.path.join(result_folder, "test_results.tsv")
if os.path.exists(results_path):
with open(results_path, "r", encoding="utf-8") as results_file:
test_results = list(csv.reader(results_file, delimiter="\t"))
if len(test_results) == 0:
return "error", "Empty test_results.tsv", test_results, additional_files
return state, description, test_results, additional_files
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
temp_path = TEMP_PATH
if not os.path.exists(temp_path):
os.makedirs(temp_path)
pr_info = PRInfo()
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(temp_path, "clickhouse/fasttest")
s3_helper = S3Helper("https://s3.amazonaws.com")
workspace = os.path.join(temp_path, "fasttest-workspace")
if not os.path.exists(workspace):
os.makedirs(workspace)
output_path = os.path.join(temp_path, "fasttest-output")
if not os.path.exists(output_path):
os.makedirs(output_path)
if not os.path.exists(CACHES_PATH):
os.makedirs(CACHES_PATH)
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {CACHES_PATH}", shell=True)
cache_path = os.path.join(CACHES_PATH, "fasttest")
logging.info("Will try to fetch cache for our build")
ccache_for_pr = get_ccache_if_not_exists(
cache_path, s3_helper, pr_info.number, temp_path
)
upload_master_ccache = ccache_for_pr in (-1, 0)
if not os.path.exists(cache_path):
logging.info("cache was not fetched, will create empty dir")
os.makedirs(cache_path)
repo_path = os.path.join(temp_path, "fasttest-repo")
if not os.path.exists(repo_path):
os.makedirs(repo_path)
run_cmd = METHOD_NAME(
workspace,
output_path,
cache_path,
repo_path,
pr_info.number,
pr_info.sha,
docker_image,
)
logging.info("Going to run fasttest with cmd %s", run_cmd)
logs_path = os.path.join(temp_path, "fasttest-logs")
if not os.path.exists(logs_path):
os.makedirs(logs_path)
run_log_path = os.path.join(logs_path, "runlog.log")
with TeePopen(run_cmd, run_log_path, timeout=40 * 60) as process:
retcode = process.wait()
if retcode == 0:
logging.info("Run successfully")
else:
logging.info("Run failed")
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {cache_path}", shell=True)
test_output_files = os.listdir(output_path)
additional_logs = []
for f in test_output_files:
additional_logs.append(os.path.join(output_path, f))
test_log_exists = (
"test_log.txt" in test_output_files or "test_result.txt" in test_output_files
)
test_result_exists = "test_results.tsv" in test_output_files
test_results = []
if "submodule_log.txt" not in test_output_files:
description = "Cannot clone repository"
state = "failure"
elif "cmake_log.txt" not in test_output_files:
description = "Cannot fetch submodules"
state = "failure"
elif "build_log.txt" not in test_output_files:
description = "Cannot finish cmake"
state = "failure"
elif "install_log.txt" not in test_output_files:
description = "Cannot build ClickHouse"
state = "failure"
elif not test_log_exists and not test_result_exists:
description = "Cannot install or start ClickHouse"
state = "failure"
else:
state, description, test_results, additional_logs = process_results(output_path)
logging.info("Will upload cache")
upload_ccache(cache_path, s3_helper, pr_info.number, temp_path)
if upload_master_ccache:
logging.info("Will upload a fallback cache for master")
upload_ccache(cache_path, s3_helper, 0, temp_path)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, NAME, test_results)
report_url = upload_results(
s3_helper,
pr_info.number,
pr_info.sha,
test_results,
[run_log_path] + additional_logs,
NAME,
True,
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(gh, pr_info.sha, NAME, description, state, report_url)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,
test_results,
state,
stopwatch.duration_seconds,
stopwatch.start_time_str,
report_url,
NAME,
)
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
# Refuse other checks to run if fast test failed
if state != "success":
if FORCE_TESTS_LABEL in pr_info.labels and state != "error":
print(f"'{FORCE_TESTS_LABEL}' enabled, will report success")
else:
fail_simple_check(gh, pr_info, f"{NAME} failed")
sys.exit(1) |
299,997 | mass 2d | from lenstronomy.LensModel.Profiles.hernquist import Hernquist
import lenstronomy.Util.param_util as param_util
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
import numpy as np
__all__ = ["Hernquist_Ellipse"]
class Hernquist_Ellipse(LensProfileBase):
"""This class contains functions for the elliptical Hernquist profile.
Ellipticity is defined in the potential.
"""
param_names = ["sigma0", "Rs", "e1", "e2", "center_x", "center_y"]
lower_limit_default = {
"sigma0": 0,
"Rs": 0,
"e1": -0.5,
"e2": -0.5,
"center_x": -100,
"center_y": -100,
}
upper_limit_default = {
"sigma0": 100,
"Rs": 100,
"e1": 0.5,
"e2": 0.5,
"center_x": 100,
"center_y": 100,
}
def __init__(self):
self.spherical = Hernquist()
self._diff = 0.00000001
super(Hernquist_Ellipse, self).__init__()
def function(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0):
"""Returns double integral of NFW profile."""
x_, y_ = param_util.transform_e1e2_square_average(
x, y, e1, e2, center_x, center_y
)
f_ = self.spherical.function(x_, y_, sigma0, Rs)
return f_
def derivatives(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0):
"""Returns df/dx and df/dy of the function (integral of NFW)"""
x_, y_ = param_util.transform_e1e2_square_average(
x, y, e1, e2, center_x, center_y
)
phi_G, q = param_util.ellipticity2phi_q(e1, e2)
cos_phi = np.cos(phi_G)
sin_phi = np.sin(phi_G)
e = param_util.q2e(q)
f_x_prim, f_y_prim = self.spherical.derivatives(x_, y_, sigma0, Rs)
f_x_prim *= np.sqrt(1 - e)
f_y_prim *= np.sqrt(1 + e)
f_x = cos_phi * f_x_prim - sin_phi * f_y_prim
f_y = sin_phi * f_x_prim + cos_phi * f_y_prim
return f_x, f_y
def hessian(self, x, y, sigma0, Rs, e1, e2, center_x=0, center_y=0):
"""Returns Hessian matrix of function d^2f/dx^2, d^2/dxdy, d^2/dydx,
d^f/dy^2."""
alpha_ra, alpha_dec = self.derivatives(
x, y, sigma0, Rs, e1, e2, center_x, center_y
)
diff = self._diff
alpha_ra_dx, alpha_dec_dx = self.derivatives(
x + diff, y, sigma0, Rs, e1, e2, center_x, center_y
)
alpha_ra_dy, alpha_dec_dy = self.derivatives(
x, y + diff, sigma0, Rs, e1, e2, center_x, center_y
)
f_xx = (alpha_ra_dx - alpha_ra) / diff
f_xy = (alpha_ra_dy - alpha_ra) / diff
f_yx = (alpha_dec_dx - alpha_dec) / diff
f_yy = (alpha_dec_dy - alpha_dec) / diff
return f_xx, f_xy, f_yx, f_yy
def density(self, r, rho0, Rs, e1=0, e2=0):
"""Computes the 3-d density.
:param r: 3-d radius
:param rho0: density normalization
:param Rs: Hernquist radius
:return: density at radius r
"""
return self.spherical.density(r, rho0, Rs)
def density_lens(self, r, sigma0, Rs, e1=0, e2=0):
"""Density as a function of 3d radius in lensing parameters This function
converts the lensing definition sigma0 into the 3d density.
:param r: 3d radius
:param sigma0: rho0 * Rs (units of projected density)
:param Rs: Hernquist radius
:return: enclosed mass in 3d
"""
return self.spherical.density_lens(r, sigma0, Rs)
def density_2d(self, x, y, rho0, Rs, e1=0, e2=0, center_x=0, center_y=0):
"""Projected density along the line of sight at coordinate (x, y)
:param x: x-coordinate
:param y: y-coordinate
:param rho0: density normalization
:param Rs: Hernquist radius
:param center_x: x-center of the profile
:param center_y: y-center of the profile
:return: projected density
"""
return self.spherical.density_2d(x, y, rho0, Rs, center_x, center_y)
def mass_2d_lens(self, r, sigma0, Rs, e1=0, e2=0):
"""Mass enclosed projected 2d sphere of radius r Same as mass_2d but with input
normalization in units of projected density.
:param r: projected radius
:param sigma0: rho0 * Rs (units of projected density)
:param Rs: Hernquist radius
:return: mass enclosed 2d projected radius
"""
return self.spherical.mass_2d_lens(r, sigma0, Rs)
def METHOD_NAME(self, r, rho0, Rs, e1=0, e2=0):
"""Mass enclosed projected 2d sphere of radius r.
:param r: projected radius
:param rho0: density normalization
:param Rs: Hernquist radius
:return: mass enclosed 2d projected radius
"""
return self.spherical.METHOD_NAME(r, rho0, Rs)
def mass_3d(self, r, rho0, Rs, e1=0, e2=0):
"""Mass enclosed a 3d sphere or radius r.
:param r: 3-d radius within the mass is integrated (same distance units as
density definition)
:param rho0: density normalization
:param Rs: Hernquist radius
:return: enclosed mass
"""
return self.spherical.mass_3d(r, rho0, Rs)
def mass_3d_lens(self, r, sigma0, Rs, e1=0, e2=0):
"""Mass enclosed a 3d sphere or radius r in lensing parameterization.
:param r: 3-d radius within the mass is integrated (same distance units as
density definition)
:param sigma0: rho0 * Rs (units of projected density)
:param Rs: Hernquist radius
:return: enclosed mass
"""
return self.spherical.mass_3d_lens(r, sigma0, Rs) |
299,998 | bootstrap field | import json
from crispy_forms.helper import FormHelper
from crispy_forms.utils import render_crispy_form
from django import template
from django.conf import settings
from django.urls import reverse, NoReverseMatch
from django.template.defaultfilters import safe
from hashlib import md5
from markdown import markdown as to_markdown
from bootstrap3.templatetags.bootstrap3 import METHOD_NAME as b3_field
from allauth.socialaccount.models import SocialApp
from allauth.socialaccount import providers
from squad import version
from squad.compat import get_socialaccount_provider
from squad.core.models import Test, Build
from squad.core.utils import format_metadata
from squad.jinja2 import register_global_function, register_filter
# For DRF's compatibility with DTL
register = template.Library()
@register_global_function
def METHOD_NAME(*args, **kwargs):
return b3_field(*args, **kwargs)
@register_global_function
def url(path, *args, **kwargs):
try:
return reverse(path, *args, **kwargs)
except NoReverseMatch:
return None
@register_global_function
def string(value):
return str(value)
@register_global_function
def group_url(group):
return reverse('group', args=[group.slug])
@register_global_function
def project_url(the_object):
name = type(the_object).__name__.lower()
if name == 'project':
project = the_object
args = (project.group.slug, project.slug)
else:
project = the_object.project
group = project.group
args = (group.slug, project.slug) + (the_object.version,)
return reverse(name, args=args)
@register_global_function
def testrun_suite_tests_url(group, project, build, status):
return testrun_suite_or_test_url(group, project, build, status, 'testrun_suite_tests')
@register_global_function
def testrun_suite_metrics_url(group, project, build, status):
return testrun_suite_or_test_url(group, project, build, status, 'testrun_suite_metrics')
@register_global_function
def testrun_suite_test_details_url(group, project, build, status, test):
return testrun_suite_or_test_url(group, project, build, status, 'testrun_suite_test_details', test)
@register_global_function
def testrun_suite_test_details_history_url(group, project, build, status, test):
return testrun_suite_or_test_url(group, project, build, status, 'test_history', test)
def testrun_suite_or_test_url(group, project, build, status, kind, test=None):
testrun = status.test_run.id
suite = status.suite
args = (
group.slug,
project.slug,
build.version,
testrun,
suite.slug.replace('/', '$'),
)
if test:
if isinstance(test, Test):
args = args + (test.name.replace('/', '$'),)
else:
args = args + (test.replace('/', '$'),)
return reverse(kind, args=args)
@register_global_function
def build_url(build):
return reverse("build", args=(build.project.group.slug, build.project.slug, build.version))
@register_global_function
def previous_build_url(build):
previous_build = None
try:
previous_build = build.get_previous_by_created_at(project=build.project)
except Build.DoesNotExist:
pass
if previous_build:
return build_url(previous_build)
@register_global_function
def next_build_url(build):
next_build = None
try:
next_build = build.get_next_by_created_at(project=build.project)
except Build.DoesNotExist:
pass
if next_build:
return build_url(next_build)
else:
return build_url(build)
@register_global_function
def back_to_latest_build_url(build):
return build_url(Build.objects.filter(project=build.project).last())
@register_global_function
def project_section_url(project, name):
return reverse(name, args=(project.group.slug, project.slug))
@register_global_function
def build_section_url(build, name):
return reverse(name, args=(build.project.group.slug, build.project.slug, build.version))
@register_global_function
def download_build_attachments_url(group_slug, project_slug, build_version, testrun, filename):
return reverse('build_attachments', args=(group_slug, project_slug, build_version, testrun, filename))
@register_global_function
def project_status(project):
if project.latest_build is not None:
return project.latest_build.status
return None
# Needed to rename this function due to conflict with Django's auth module
# that already sets a global 'site_name', overwritting ours
# https://github.com/django/django/blob/master/django/contrib/auth/views.py#L99
@register_global_function
@register.simple_tag
def squad_site_name():
return settings.SITE_NAME
@register_global_function(takes_context=True)
@register.simple_tag(takes_context=True)
def active(context, name):
wanted = reverse(name)
path = context['request'].path
if path == wanted:
return 'active'
else:
return ''
@register_global_function(takes_context=True)
def login_message(context, tag, classes):
msg = settings.SQUAD_LOGIN_MESSAGE
if msg:
return '<%s class="%s">%s</%s>' % (tag, classes, msg, tag)
else:
return ''
@register_global_function
@register.simple_tag
def squad_version():
return version.__version__
@register.filter
@register_filter
def metadata_value(v):
return format_metadata(v, "<br/>")
@register_filter
def markdown(mkdn):
if mkdn is None:
return ''
return safe(to_markdown(mkdn))
@register_filter
def get_page_list(items):
first = max(items.number - 5, 1)
last = min(items.number + 5, items.paginator.num_pages)
pages = range(first, last + 1)
return {
"link_first": 1 not in pages,
"head_ellipsis": 2 not in pages,
"pages": pages,
"tail_ellipsis": (items.paginator.num_pages - 1) not in pages,
"link_last": items.paginator.num_pages not in pages,
}
@register_global_function(takes_context=True)
def update_get_parameters(context, parameters):
query_string = context['request'].GET.copy()
for p in parameters.keys():
if parameters[p] is None and p in query_string.keys():
del query_string[p]
else:
query_string[p] = parameters[p]
return '?' + query_string.urlencode()
@register_global_function(takes_context=True)
def strip_get_parameters(context, parameters):
return update_get_parameters(context, {p: None for p in parameters})
@register_global_function(takes_context=True)
def get_page_url(context, page):
return update_get_parameters(context, {'page': page})
@register_filter
def add_class(field, class_name):
return field.as_widget(attrs={"class": class_name})
@register_global_function
@register.simple_tag
def avatar_url(email, size=150):
h = md5(email.encode('utf-8').strip().lower()).hexdigest()
return 'https://www.gravatar.com/avatar/%s?s=%s&default=mm' % (h, size)
@register_global_function(takes_context=True)
def crispy(context, form, **options):
helper = FormHelper()
helper.form_tag = False
for option, value in options.items():
setattr(helper, option, value)
return render_crispy_form(form, helper=helper, context=context)
@register_global_function()
def to_json(d):
try:
json_string = json.dumps(d)
except TypeError:
json_string = ''
return json_string
@register_global_function(takes_context=True)
def socialaccount_providers(context):
request = context['request']
return_dict = {}
for socialapp in SocialApp.objects.all():
provider = get_socialaccount_provider(providers, socialapp, request)
return_dict.update({provider: provider.get_login_url(request)})
return return_dict |
299,999 | query op | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict
from elasticsearch_dsl import Q
from cachetools import cached
from optimade.filterparser import LarkParser
from optimade.filtertransformers.elasticsearch import (
ElasticsearchQuantity as Quantity, ElasticTransformer as OPTElasticTransformer)
from .common import provider_specific_fields
_parser = LarkParser(version=(1, 0, 1))
class FilterException(Exception):
''' Raised on parsing a filter expression with syntactic of semantic errors. '''
pass
@cached(cache={})
def _get_transformer(without_prefix, **kwargs):
from nomad.datamodel import OptimadeEntry
quantities: Dict[str, Quantity] = {
q.name: Quantity(
q.name, backend_field='optimade.%s' % q.name,
elastic_mapping_type=q.a_elasticsearch.mapping['type'])
for q in OptimadeEntry.m_def.all_quantities.values()
if 'elasticsearch' in q.m_annotations}
quantities['id'] = Quantity('id', backend_field='entry_id', elastic_mapping_type='keyword')
quantities['immutable_id'] = Quantity('immutable_id', backend_field='entry_id', elastic_mapping_type='keyword')
quantities['last_modified'] = Quantity(
'last_modified', backend_field='upload_create_time', elastic_mapping_type='date')
quantities['elements'].length_quantity = quantities['nelements']
quantities['elements'].nested_quantity = quantities['elements_ratios']
quantities['elements_ratios'].nested_quantity = quantities['elements_ratios']
for name, search_quantity in provider_specific_fields().items():
names = ['_nmd_' + name]
if without_prefix:
names.append(name)
for name in names:
if name not in quantities:
quantities[name] = Quantity(
name,
backend_field=search_quantity.search_field,
elastic_mapping_type=search_quantity.mapping['type'])
return ElasticTransformer(quantities=quantities, **kwargs)
def parse_filter(filter_str: str, without_prefix=False) -> Q:
''' Parses the given optimade filter str and returns a suitable elastic search query.
Arguments:
filter_str: Can be direct user input with no prior processing.
nomad_properties: Also include the nomad proprietary properties.
without_prefix: Do not prefix the nomad proprietary properties with _nmd_.
Raises:
FilterException: If the given str cannot be parsed, or if there are any semantic
errors in the given expression.
'''
from .elasticsearch import NomadStructureMapper
transformer = _get_transformer(without_prefix, mapper=NomadStructureMapper)
try:
parse_tree = _parser.parse(filter_str)
except Exception as e:
raise FilterException('Syntax error: %s' % str(e))
try:
query = transformer.transform(parse_tree)
except Exception as e:
raise FilterException('Semantic error: %s' % str(e))
return query
class ElasticTransformer(OPTElasticTransformer):
def METHOD_NAME(self, quantity, op, value, nested=None):
"""
Return a range, match, or term query for the given quantity, comparison
operator, and value
"""
field = self._field(quantity, nested=nested)
if op in self.operator_map:
return Q("range", **{field: {self.operator_map[op]: value}})
if quantity.elastic_mapping_type == 'text':
query_type = "match"
elif quantity.elastic_mapping_type in ['keyword', 'integer', 'float', 'bool']:
query_type = "term"
else:
raise NotImplementedError("Quantity has unsupported ES field type")
if op in ["=", ""]:
return Q(query_type, **{field: value})
if op == "!=":
return ~Q( # pylint: disable=invalid-unary-operand-type
query_type, **{field: value}
)
def _has_query_op(self, quantities, op, predicate_zip_list):
# We override this to add 'HAS ONLY' support.
if op == 'HAS ONLY':
# HAS ONLY can be achieved by rewriting to a combination of HAS ALL and
# length = n_values. Therefore, it is only support for quantities with a
# length quantity.
if len(quantities) > 1:
raise Exception('HAS ONLY is not supported with zip')
quantity = quantities[0]
if quantity.length_quantity is None:
raise Exception('HAS ONLY is not supported by %s' % quantity.name)
has_all = super()._has_query_op(quantities, 'HAS ALL', predicate_zip_list)
has_length = Q('term', **{quantity.length_quantity.backend_field: len(predicate_zip_list)})
return has_all & has_length
else:
return super()._has_query_op(quantities, op, predicate_zip_list)
def property_zip_addon(self, args):
return args
def value_zip(self, args):
return self.value_list(args)
def value_zip_list(self, args):
return args |