repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
daswer123/rvc-python | rvc_python/__main__.py | [
{
"identifier": "infer_file",
"path": "rvc_python/infer.py",
"snippet": "def infer_file(\n input_path,\n model_path,\n index_path = \"\",\n device = \"cpu:0\",\n f0method = \"harvest\",\n opt_path = \"out.wav\",\n index_rate = 0.5,\n filter_radius = 3,\n resample_sr = 0,\n rms_mix_rate = 1,\n protect = 0.33,\n f0up_key = 0,\n version = \"v2\"\n):\n lib_dir = os.path.dirname(os.path.abspath(__file__))\n\n download_rvc_models(lib_dir)\n config = Config(lib_dir,device)\n vc = VC(lib_dir,config)\n\n vc.get_vc(model_path,version)\n wav_opt = vc.vc_single(\n sid=1,\n input_audio_path=input_path,\n f0_up_key=f0up_key,\n f0_method=f0method,\n file_index=index_path,\n index_rate=index_rate,\n filter_radius=filter_radius,\n resample_sr=resample_sr,\n rms_mix_rate=rms_mix_rate,\n protect=protect,\n f0_file=\"\",\n file_index2=\"\"\n )\n wavfile.write(opt_path, vc.tgt_sr, wav_opt)\n return opt_path"
},
{
"identifier": "infer_files",
"path": "rvc_python/infer.py",
"snippet": "def infer_files(\n dir_path,\n model_path,\n paths=[],\n index_path=\"\",\n device=\"cuda:0\",\n f0method=\"harvest\",\n opt_dir=\"out/\",\n index_rate=0.5,\n filter_radius=3,\n resample_sr=0,\n rms_mix_rate=1,\n protect=0.33,\n f0up_key=0,\n version=\"v2\",\n out_format=\"wav\"\n):\n # Create output directory if it does not exist\n os.makedirs(opt_dir, exist_ok=True)\n\n # Determine the files to process\n audio_files = paths if paths else glob(os.path.join(dir_path, '*.*'))\n\n # Initialize some common VC-related variables outside of loop\n lib_dir = os.path.dirname(os.path.abspath(__file__))\n download_rvc_models(lib_dir)\n\n config = Config(lib_dir, device)\n\n vc = VC(lib_dir, config)\n vc.get_vc(model_path, version)\n\n processed_files = []\n\n for input_audio_path in audio_files:\n output_filename = os.path.splitext(os.path.basename(input_audio_path))[0] + '.' + out_format\n opt_path = os.path.join(opt_dir, output_filename)\n\n wav_opt = vc.vc_single(\n sid=1,\n input_audio_path=input_audio_path,\n f0_up_key=f0up_key,\n f0_method=f0method,\n file_index=index_path,\n index_rate=index_rate,\n filter_radius=filter_radius,\n resample_sr=resample_sr,\n rms_mix_rate=rms_mix_rate,\n protect=protect,\n f0_file=\"\",\n file_index2=\"\"\n )\n \n wavfile.write(opt_path, vc.tgt_sr, wav_opt)\n processed_files.append(opt_path)\n\n return processed_files"
}
] | import argparse
import sys
import os
from argparse import ArgumentParser
from rvc_python.infer import infer_file,infer_files | 1,356 |
parser = ArgumentParser(description="RVC inference")
# Create a mutually exclusive group for input - only one of them can be provided
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument("-i", "--input", type=str, help="Path to input file")
input_group.add_argument("-d", "--dir", type=str, help="Directory path containing audio files")
parser.add_argument("-pi","--pitch", default=0, type=int, help="Transpose (integer, number of semitones)")
parser.add_argument("-ip","--index", type=str, nargs='?', default="", help="Path to index file (optional)")
parser.add_argument("-me","--method", type=str, default="harvest", choices=['harvest', "crepe", "rmvpe", 'pm'], help="Pitch extraction algorithm")
parser.add_argument("-v","--version", type=str, default="v2", choices=['v1', "v2"], help="Model version")
parser.add_argument("-o","--output", type=str, nargs='?', default="out.wav", help="Output path for single file, or output directory for multiple files")
parser.add_argument("-mp","--model", type=str, required=True, help="Path to model file")
parser.add_argument("-ir","--index_rate", type=float, default=0.5, help="Search feature ratio")
parser.add_argument("-de","--device", type=str, default="cuda:0", help="Device to use (e.g., cpu:0, cuda:0)")
parser.add_argument("-fr","--filter_radius", type=int, default=3, help="Apply median filtering to the pitch results")
parser.add_argument("-rsr","--resample_sr", type=int, default=0, help="Resample rate for the output audio")
parser.add_argument("-rmr","--rms_mix_rate", type=float,default=0.25 ,help="Volume envelope mix rate")
parser.add_argument("-pr",'--protect' ,type=float,default=0.33 ,help='Protect voiceless consonants and breath sounds')
args = parser.parse_args()
if args.input:
# Single file processing
inferred_path = infer_file(
input_path=args.input,
model_path=args.model,
index_path=args.index,
device=args.device,
f0method=args.method,
f0up_key=args.pitch,
opt_path=args.output,
index_rate=args.index_rate,
filter_radius=args.filter_radius,
resample_sr=args.resample_sr,
rms_mix_rate=args.rms_mix_rate,
protect=args.protect,
version=args.version
)
elif args.dir:
# Directory processing
|
parser = ArgumentParser(description="RVC inference")
# Create a mutually exclusive group for input - only one of them can be provided
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument("-i", "--input", type=str, help="Path to input file")
input_group.add_argument("-d", "--dir", type=str, help="Directory path containing audio files")
parser.add_argument("-pi","--pitch", default=0, type=int, help="Transpose (integer, number of semitones)")
parser.add_argument("-ip","--index", type=str, nargs='?', default="", help="Path to index file (optional)")
parser.add_argument("-me","--method", type=str, default="harvest", choices=['harvest', "crepe", "rmvpe", 'pm'], help="Pitch extraction algorithm")
parser.add_argument("-v","--version", type=str, default="v2", choices=['v1', "v2"], help="Model version")
parser.add_argument("-o","--output", type=str, nargs='?', default="out.wav", help="Output path for single file, or output directory for multiple files")
parser.add_argument("-mp","--model", type=str, required=True, help="Path to model file")
parser.add_argument("-ir","--index_rate", type=float, default=0.5, help="Search feature ratio")
parser.add_argument("-de","--device", type=str, default="cuda:0", help="Device to use (e.g., cpu:0, cuda:0)")
parser.add_argument("-fr","--filter_radius", type=int, default=3, help="Apply median filtering to the pitch results")
parser.add_argument("-rsr","--resample_sr", type=int, default=0, help="Resample rate for the output audio")
parser.add_argument("-rmr","--rms_mix_rate", type=float,default=0.25 ,help="Volume envelope mix rate")
parser.add_argument("-pr",'--protect' ,type=float,default=0.33 ,help='Protect voiceless consonants and breath sounds')
args = parser.parse_args()
if args.input:
# Single file processing
inferred_path = infer_file(
input_path=args.input,
model_path=args.model,
index_path=args.index,
device=args.device,
f0method=args.method,
f0up_key=args.pitch,
opt_path=args.output,
index_rate=args.index_rate,
filter_radius=args.filter_radius,
resample_sr=args.resample_sr,
rms_mix_rate=args.rms_mix_rate,
protect=args.protect,
version=args.version
)
elif args.dir:
# Directory processing | processed_files = infer_files( | 1 | 2023-12-26 19:05:42+00:00 | 2k |
CodeBrugs/ToolSculpt | tests/test_tools/test_tool2.py | [
{
"identifier": "process_data",
"path": "src/tools/Tool2/tool2_functions.py",
"snippet": "def process_data(data):\n \"\"\"\n Procesa los datos utilizando la lógica específica de Tool2.\n\n Parameters:\n - data (str): Datos de entrada.\n\n Returns:\n - result (str): Resultado del procesamiento.\n \"\"\"\n # Operación específica: Invertir los datos\n inverted_data = data[::-1]\n \n # Operación específica: Aplicar una transformación especial\n processed_data = inverted_data.upper() # Convertir a mayúsculas como ejemplo\n \n result = f\"Tool2 aplicó una transformación especial: {processed_data}\"\n return result"
},
{
"identifier": "analyze_data",
"path": "src/tools/Tool2/tool2_functions.py",
"snippet": "def analyze_data(data):\n \"\"\"\n Analiza los datos utilizando la lógica específica de Tool2.\n\n Parameters:\n - data (str): Datos de entrada.\n\n Returns:\n - analysis (str): Resultado del análisis.\n \"\"\"\n # Operación específica: Contar la cantidad de caracteres alfabéticos\n alphabet_count = sum(c.isalpha() for c in data)\n \n # Operación específica: Realizar un análisis basado en la cantidad de caracteres alfabéticos\n if alphabet_count > len(data) // 2:\n analysis = \"Tool2 encontró más de la mitad de los caracteres como alfabéticos.\"\n else:\n analysis = \"Tool2 encontró menos de la mitad de los caracteres como alfabéticos.\"\n \n return analysis"
},
{
"identifier": "perform_additional_task",
"path": "src/tools/Tool2/tool2_functions.py",
"snippet": "def perform_additional_task():\n \"\"\"\n Realiza una tarea adicional específica de Tool2.\n\n Returns:\n - task_result (str): Resultado de la tarea.\n \"\"\"\n # Operación específica: Realizar una tarea adicional\n task_result = \"Tool2 realizó una tarea adicional con éxito.\"\n return task_result"
}
] | import unittest
from src.tools.Tool2.tool2_functions import process_data, analyze_data, perform_additional_task | 649 | # tests/test_tools/test_tool2.py
class TestTool2Functions(unittest.TestCase):
def test_process_data(self):
# Prueba para la función process_data
input_data = "example_data"
result = process_data(input_data)
self.assertEqual(result, "Tool2 processed the data: A_SPECIAL_TRANSFORMATION")
def test_analyze_data(self):
# Prueba para la función analyze_data
input_data = "example_data"
result = analyze_data(input_data)
self.assertEqual(result, "Tool2 analyzed the data: example_data. Alphabetic characters: 11")
def test_perform_additional_task(self):
# Prueba para la función perform_additional_task
| # tests/test_tools/test_tool2.py
class TestTool2Functions(unittest.TestCase):
def test_process_data(self):
# Prueba para la función process_data
input_data = "example_data"
result = process_data(input_data)
self.assertEqual(result, "Tool2 processed the data: A_SPECIAL_TRANSFORMATION")
def test_analyze_data(self):
# Prueba para la función analyze_data
input_data = "example_data"
result = analyze_data(input_data)
self.assertEqual(result, "Tool2 analyzed the data: example_data. Alphabetic characters: 11")
def test_perform_additional_task(self):
# Prueba para la función perform_additional_task | result = perform_additional_task() | 2 | 2023-12-26 17:03:20+00:00 | 2k |
run-llama/rags | pages/3_🤖_Generated_RAG_Agent.py | [
{
"identifier": "add_sidebar",
"path": "st_utils.py",
"snippet": "def add_sidebar() -> None:\n \"\"\"Add sidebar.\"\"\"\n with st.sidebar:\n agent_registry = cast(AgentCacheRegistry, st.session_state.agent_registry)\n st.session_state.cur_agent_ids = agent_registry.get_agent_ids()\n choices = [\"Create a new agent\"] + st.session_state.cur_agent_ids\n\n # by default, set index to 0. if value is in selected_id, set index to that\n index = 0\n if \"selected_id\" in st.session_state.keys():\n if st.session_state.selected_id is not None:\n index = choices.index(st.session_state.selected_id)\n # display buttons\n st.radio(\n \"Agents\",\n choices,\n index=index,\n on_change=update_selected_agent,\n key=\"agent_selector\",\n )"
},
{
"identifier": "get_current_state",
"path": "st_utils.py",
"snippet": "def get_current_state() -> CurrentSessionState:\n \"\"\"Get current state.\n\n This includes current state stored in session state and derived from it, e.g.\n - agent registry\n - selected agent\n - selected cache\n - agent builder\n - builder agent\n\n \"\"\"\n # get agent registry\n agent_registry = AgentCacheRegistry(str(AGENT_CACHE_DIR))\n if \"agent_registry\" not in st.session_state.keys():\n st.session_state.agent_registry = agent_registry\n\n if \"cur_agent_ids\" not in st.session_state.keys():\n st.session_state.cur_agent_ids = agent_registry.get_agent_ids()\n\n if \"selected_id\" not in st.session_state.keys():\n st.session_state.selected_id = None\n\n # set selected cache if doesn't exist\n if (\n \"selected_cache\" not in st.session_state.keys()\n or st.session_state.selected_cache is None\n ):\n # update selected cache\n if st.session_state.selected_id is None:\n st.session_state.selected_cache = None\n else:\n # load agent from directory\n agent_registry = cast(AgentCacheRegistry, st.session_state.agent_registry)\n agent_cache = agent_registry.get_agent_cache(st.session_state.selected_id)\n st.session_state.selected_cache = agent_cache\n\n # set builder agent / agent builder\n if (\n \"builder_agent\" not in st.session_state.keys()\n or st.session_state.builder_agent is None\n or \"agent_builder\" not in st.session_state.keys()\n or st.session_state.agent_builder is None\n ):\n if (\n \"selected_cache\" in st.session_state.keys()\n and st.session_state.selected_cache is not None\n ):\n # create builder agent / tools from selected cache\n builder_agent, agent_builder = load_meta_agent_and_tools(\n cache=st.session_state.selected_cache,\n agent_registry=st.session_state.agent_registry,\n # NOTE: we will probably generalize this later into different\n # builder configs\n is_multimodal=get_cached_is_multimodal(),\n )\n else:\n # create builder agent / tools from new cache\n builder_agent, agent_builder = load_meta_agent_and_tools(\n agent_registry=st.session_state.agent_registry,\n is_multimodal=get_is_multimodal(),\n )\n\n st.session_state.builder_agent = builder_agent\n st.session_state.agent_builder = agent_builder\n\n return CurrentSessionState(\n agent_registry=st.session_state.agent_registry,\n selected_id=st.session_state.selected_id,\n selected_cache=st.session_state.selected_cache,\n agent_builder=st.session_state.agent_builder,\n cache=st.session_state.agent_builder.cache,\n builder_agent=st.session_state.builder_agent,\n )"
},
{
"identifier": "get_image_and_text_nodes",
"path": "core/utils.py",
"snippet": "def get_image_and_text_nodes(\n nodes: List[NodeWithScore],\n) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:\n image_nodes = []\n text_nodes = []\n for res_node in nodes:\n if isinstance(res_node.node, ImageNode):\n image_nodes.append(res_node)\n else:\n text_nodes.append(res_node)\n return image_nodes, text_nodes"
}
] | import streamlit as st
import pandas as pd
from st_utils import add_sidebar, get_current_state
from core.utils import get_image_and_text_nodes
from llama_index.schema import MetadataMode
from llama_index.chat_engine.types import AGENT_CHAT_RESPONSE_TYPE
from typing import Dict, Optional | 1,164 | """Streamlit page showing builder config."""
####################
#### STREAMLIT #####
####################
st.set_page_config(
page_title="Generated RAG Agent",
page_icon="🦙",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
st.title("Generated RAG Agent")
current_state = get_current_state()
add_sidebar()
if (
"agent_messages" not in st.session_state.keys()
): # Initialize the chat messages history
st.session_state.agent_messages = [
{"role": "assistant", "content": "Ask me a question!"}
]
def display_sources(response: AGENT_CHAT_RESPONSE_TYPE) -> None:
| """Streamlit page showing builder config."""
####################
#### STREAMLIT #####
####################
st.set_page_config(
page_title="Generated RAG Agent",
page_icon="🦙",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
st.title("Generated RAG Agent")
current_state = get_current_state()
add_sidebar()
if (
"agent_messages" not in st.session_state.keys()
): # Initialize the chat messages history
st.session_state.agent_messages = [
{"role": "assistant", "content": "Ask me a question!"}
]
def display_sources(response: AGENT_CHAT_RESPONSE_TYPE) -> None: | image_nodes, text_nodes = get_image_and_text_nodes(response.source_nodes) | 2 | 2023-11-16 07:49:44+00:00 | 2k |
open-mmlab/Amphion | modules/diffusion/bidilconv/residual_block.py | [
{
"identifier": "GaU",
"path": "modules/activation_functions/gated_activation_unit.py",
"snippet": "class GaU(nn.Module):\n r\"\"\"Gated Activation Unit (GaU) proposed in `Gated Activation Units for Neural\n Networks <https://arxiv.org/pdf/1606.05328.pdf>`_.\n\n Args:\n channels: number of input channels.\n kernel_size: kernel size of the convolution.\n dilation: dilation rate of the convolution.\n d_context: dimension of context tensor, None if don't use context.\n \"\"\"\n\n def __init__(\n self,\n channels: int,\n kernel_size: int = 3,\n dilation: int = 1,\n d_context: int = None,\n ):\n super().__init__()\n\n self.context = d_context\n\n self.conv = Conv1d(\n channels,\n channels * 2,\n kernel_size,\n dilation=dilation,\n padding=dilation * (kernel_size - 1) // 2,\n )\n\n if self.context:\n self.context_proj = Conv1d(d_context, channels * 2, 1)\n\n def forward(self, x: torch.Tensor, context: torch.Tensor = None):\n r\"\"\"Calculate forward propagation.\n\n Args:\n x: input tensor with shape [B, C, T].\n context: context tensor with shape [B, ``d_context``, T], default to None.\n \"\"\"\n\n h = self.conv(x)\n\n if self.context:\n h = h + self.context_proj(context)\n\n h1, h2 = h.chunk(2, 1)\n h = torch.tanh(h1) * torch.sigmoid(h2)\n\n return h"
},
{
"identifier": "Conv1d",
"path": "modules/general/utils.py",
"snippet": "def Conv1d(*args, **kwargs):\n r\"\"\"Wrapper of ``nn.Conv1d`` with kaiming_normal_ initialization.\"\"\"\n layer = nn.Conv1d(*args, **kwargs)\n nn.init.kaiming_normal_(layer.weight)\n return layer"
}
] | import math
import torch
import torch.nn as nn
from modules.activation_functions import GaU
from modules.general.utils import Conv1d | 701 | # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class ResidualBlock(nn.Module):
r"""Residual block with dilated convolution, main portion of ``BiDilConv``.
Args:
channels: The number of channels of input and output.
kernel_size: The kernel size of dilated convolution.
dilation: The dilation rate of dilated convolution.
d_context: The dimension of content encoder output, None if don't use context.
"""
def __init__(
self,
channels: int = 256,
kernel_size: int = 3,
dilation: int = 1,
d_context: int = None,
):
super().__init__()
self.context = d_context
| # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class ResidualBlock(nn.Module):
r"""Residual block with dilated convolution, main portion of ``BiDilConv``.
Args:
channels: The number of channels of input and output.
kernel_size: The kernel size of dilated convolution.
dilation: The dilation rate of dilated convolution.
d_context: The dimension of content encoder output, None if don't use context.
"""
def __init__(
self,
channels: int = 256,
kernel_size: int = 3,
dilation: int = 1,
d_context: int = None,
):
super().__init__()
self.context = d_context
| self.gau = GaU( | 0 | 2023-11-15 09:19:27+00:00 | 2k |
ise-uiuc/magicoder | src/magicoder/decontamination/find_substrings.py | [
{
"identifier": "FILTER_OUT",
"path": "src/magicoder/decontamination/benchmark_data.py",
"snippet": "FILTER_OUT = {k: v() for k, v in LAZY_FILTER_OUT.items()}"
},
{
"identifier": "add_dict",
"path": "src/magicoder/decontamination/utils.py",
"snippet": "def add_dict(dict1: dict, dict2: dict) -> None:\n \"\"\"\n Add the values of dict2 to dict1. All values must be int, float or dictionaries that also verify this condition.\n Will modify dict1 and return None\n \"\"\"\n for key, value in dict2.items():\n if isinstance(value, (int, float)):\n if key not in dict1:\n dict1[key] = 0\n dict1[key] += value\n elif isinstance(value, dict):\n if key not in dict1:\n dict1[key] = {}\n assert isinstance(dict1[key], dict)\n add_dict(dict1[key], value)\n else:\n raise ValueError(f\"Invalid type for key/value {key}: {value}\")"
},
{
"identifier": "shard_dataset",
"path": "src/magicoder/decontamination/utils.py",
"snippet": "def shard_dataset(ds, shard_size, output_dir, num_proc):\n if ds._indices is not None:\n dataset_nbytes = ds.data.nbytes * len(ds._indices) / len(ds.data)\n else:\n dataset_nbytes = ds.data.nbytes\n num_shards = int(dataset_nbytes / shard_size) + 1\n print(f\"Number of shards: {num_shards}\")\n\n print(\"sharding the dataset\")\n t_start = time.time()\n shards = (\n ds.shard(num_shards=num_shards, index=i, contiguous=True)\n for i in range(num_shards)\n )\n # use f\"{OUT_PATH}/data/train-{index:05d}-of-{num_shards:05d}.json\" instead for json files\n filenames = (\n f\"{output_dir}/train-{index:05d}-of-{num_shards:05d}.parquet\"\n for index in range(num_shards)\n )\n\n with Pool(num_proc) as p:\n list(\n tqdm(\n p.imap_unordered(save_shard, zip(filenames, shards), chunksize=4),\n total=num_shards,\n )\n )\n print(f\"Time to save dataset: {time.time()-t_start:.2f}\")"
}
] | import argparse
import json
import os
import shutil
from copy import deepcopy
from glob import glob
from pathlib import Path
from datasets import load_dataset
from magicoder.utils import write_jsonl
from .benchmark_data import FILTER_OUT
from .utils import add_dict, shard_dataset | 1,404 | # type: ignore
"""Migrated from: https://github.com/bigcode-project/bigcode-dataset. License: Apache 2.0"""
SHARD_SIZE = 1000 << 20 # 1GB
LANGUAGE_COL = "lang"
# LANGUAGES = ["Python", "Java", "JavaScript"]
def dump_benchmarks(file_path: str):
"""
Dump the dictionary of benchmark samples that are filtered out
"""
with open(file_path, "w") as f:
json.dump(FILTER_OUT, f, indent=2)
def filter_reason_to_benchmark_name(filter_reason: str):
assert filter_reason.endswith("_match")
return filter_reason[:-6]
def benchmark_name_to_filter_reason(benchmark_name: str):
return f"{benchmark_name}_match"
def update_benchmark_dict(
filter_out: dict, benchmark_cache: str, excluded_data_cache: str
):
"""
Iterates on current benchmark-samples. If a sample is found in the cached benchmark-samples, it is removed (it does not need to be searched),
and the corresponding data-samples from the cache are added to `exclude_data`
Returns:
- `updated`: an updated benchmark dict where samples from the cache are removed (they do not need to be searched anymore)
- `exclude_data`: a list of files to remove from the dataset
"""
updated = deepcopy(filter_out)
exclude_data = []
with open(benchmark_cache) as f:
benchmark_cache = json.load(f)
with open(excluded_data_cache) as f:
excluded_data_cache = json.load(f)
for bench, samples in filter_out.items():
for bench_sample in samples:
# Benchmark-sample was found in cache
if bench in benchmark_cache and bench_sample in benchmark_cache[bench]:
# No need to search for this sample in the dataset
updated[bench].remove(bench_sample)
# Corresponding data-samples will be excluded from the dataset.
exclude_data += [
data_sample
for data_sample in excluded_data_cache
if data_sample["filter_reason"]
== benchmark_name_to_filter_reason(bench)
and data_sample["matched_substring"] == bench_sample
]
print("After loading cache, will search for:")
for benchmark, values in updated.items():
print(f" num strings from {benchmark}: {len(values)}")
# Remove empty benchmarks
updated = {key: value for key, value in updated.items() if len(value) > 0}
return updated, exclude_data
def find_substrings(data, columns, filter_out, return_matched=False):
"""
filter_out: Dict[str, List[str]] mapping from benchmark name to list of strings that need to be
filtered-out.
Return True, None if the file should be included in the dataset.
Otherwise return False and some metadata about the file excluded
"""
content = "\n\n".join([data[col].lower() for col in columns])
# For each substring, try to find it in the file (case insensitive)
for benchmark, substrings in filter_out.items():
for substring in substrings:
if substring.lower() in content:
if return_matched:
return False, benchmark_name_to_filter_reason(benchmark), substring
else:
return False, benchmark_name_to_filter_reason(benchmark)
# Return True, None if none of the substrings was found
if return_matched:
return True, None, None
else:
return True, None
def aggregate_meta(tmp_meta_dir: str):
res = {}
for file in glob(f"{tmp_meta_dir}/*-meta.json"):
with open(file, "r") as f:
meta = json.load(f)
| # type: ignore
"""Migrated from: https://github.com/bigcode-project/bigcode-dataset. License: Apache 2.0"""
SHARD_SIZE = 1000 << 20 # 1GB
LANGUAGE_COL = "lang"
# LANGUAGES = ["Python", "Java", "JavaScript"]
def dump_benchmarks(file_path: str):
"""
Dump the dictionary of benchmark samples that are filtered out
"""
with open(file_path, "w") as f:
json.dump(FILTER_OUT, f, indent=2)
def filter_reason_to_benchmark_name(filter_reason: str):
assert filter_reason.endswith("_match")
return filter_reason[:-6]
def benchmark_name_to_filter_reason(benchmark_name: str):
return f"{benchmark_name}_match"
def update_benchmark_dict(
filter_out: dict, benchmark_cache: str, excluded_data_cache: str
):
"""
Iterates on current benchmark-samples. If a sample is found in the cached benchmark-samples, it is removed (it does not need to be searched),
and the corresponding data-samples from the cache are added to `exclude_data`
Returns:
- `updated`: an updated benchmark dict where samples from the cache are removed (they do not need to be searched anymore)
- `exclude_data`: a list of files to remove from the dataset
"""
updated = deepcopy(filter_out)
exclude_data = []
with open(benchmark_cache) as f:
benchmark_cache = json.load(f)
with open(excluded_data_cache) as f:
excluded_data_cache = json.load(f)
for bench, samples in filter_out.items():
for bench_sample in samples:
# Benchmark-sample was found in cache
if bench in benchmark_cache and bench_sample in benchmark_cache[bench]:
# No need to search for this sample in the dataset
updated[bench].remove(bench_sample)
# Corresponding data-samples will be excluded from the dataset.
exclude_data += [
data_sample
for data_sample in excluded_data_cache
if data_sample["filter_reason"]
== benchmark_name_to_filter_reason(bench)
and data_sample["matched_substring"] == bench_sample
]
print("After loading cache, will search for:")
for benchmark, values in updated.items():
print(f" num strings from {benchmark}: {len(values)}")
# Remove empty benchmarks
updated = {key: value for key, value in updated.items() if len(value) > 0}
return updated, exclude_data
def find_substrings(data, columns, filter_out, return_matched=False):
"""
filter_out: Dict[str, List[str]] mapping from benchmark name to list of strings that need to be
filtered-out.
Return True, None if the file should be included in the dataset.
Otherwise return False and some metadata about the file excluded
"""
content = "\n\n".join([data[col].lower() for col in columns])
# For each substring, try to find it in the file (case insensitive)
for benchmark, substrings in filter_out.items():
for substring in substrings:
if substring.lower() in content:
if return_matched:
return False, benchmark_name_to_filter_reason(benchmark), substring
else:
return False, benchmark_name_to_filter_reason(benchmark)
# Return True, None if none of the substrings was found
if return_matched:
return True, None, None
else:
return True, None
def aggregate_meta(tmp_meta_dir: str):
res = {}
for file in glob(f"{tmp_meta_dir}/*-meta.json"):
with open(file, "r") as f:
meta = json.load(f) | add_dict(res, meta) | 1 | 2023-11-10 07:35:29+00:00 | 2k |
KwaiKEG/KwaiAgents | kwaiagents/tools/timedelta.py | [
{
"identifier": "Config",
"path": "kwaiagents/config.py",
"snippet": "class Config(object):\n def __init__(self) -> None:\n \"\"\"Initialize the Config class\"\"\"\n self.fast_llm_model = \"gpt-3.5-turbo\"\n self.smart_llm_model = \"gpt-4\"\n self.use_local_llm = False\n self.local_llm_host = \"localhost\"\n self.local_llm_port = 8888\n self.browse_chunk_max_length = 4096\n self.browse_summary_max_token = 300\n self.selenium_web_browser = \"chrome\"\n self.llm_max_retries = 5\n self.temperature = 1.0\n self.max_tokens_num = 4096\n self.chain_logger = ChainMessageLogger()\n\n def __str__(self):\n s = \"============ CONFIG ============\\n\"\n for key, val in self.__dict__.items():\n s += \"· \" + key.upper() + \":\\t\" + str(val) + '\\n'\n return s\n\n def to_json_file(self, fname):\n with open(fname, \"w\") as f:\n json.dump({k:v for k, v in self.__dict__.items() if k not in [\"chain_logger\"]},f, ensure_ascii=False, indent=2)\n\n def set_chain_logger(self, chain_logger):\n self.chain_logger = chain_logger"
},
{
"identifier": "BaseResult",
"path": "kwaiagents/tools/base.py",
"snippet": "class BaseResult(object):\n def __init__(self, json_data):\n self.json_data = json_data\n\n def __str__(self):\n return pprint.pformat(self.json_data)\n\n @property\n def answer(self):\n return \"\"\n\n @property\n def answer_md(self):\n return self.answer\n\n @property\n def urls(self):\n return list()\n\n @property\n def prompt_responses(self):\n return list()"
},
{
"identifier": "BaseTool",
"path": "kwaiagents/tools/base.py",
"snippet": "class BaseTool(object):\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self):\n return BaseResult({})"
}
] | from datetime import datetime
from dateutil.relativedelta import relativedelta
from kwaiagents.config import Config
from kwaiagents.tools.base import BaseResult, BaseTool | 643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: PAN Leyi
# Email: [email protected]
class TimeDeltaResult(BaseResult):
@property
def answer(self):
item = self.json_data
rst = ""
for key in item.keys():
rst += f'{key}: {item[key]}\n'
return rst
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: PAN Leyi
# Email: [email protected]
class TimeDeltaResult(BaseResult):
@property
def answer(self):
item = self.json_data
rst = ""
for key in item.keys():
rst += f'{key}: {item[key]}\n'
return rst
| class TimeDeltaTool(BaseTool): | 2 | 2023-11-13 03:37:02+00:00 | 2k |
EnVision-Research/LucidDreamer | scene/dataset_readers.py | [
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "focal2fov",
"path": "utils/graphics_utils.py",
"snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))"
},
{
"identifier": "fov2focal",
"path": "utils/graphics_utils.py",
"snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))"
},
{
"identifier": "init_from_pointe",
"path": "utils/pointe_utils.py",
"snippet": "def init_from_pointe(prompt):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('creating base model...')\n base_name = 'base40M-textvec'\n base_model = model_from_config(MODEL_CONFIGS[base_name], device)\n base_model.eval()\n base_diffusion = diffusion_from_config(DIFFUSION_CONFIGS[base_name])\n print('creating upsample model...')\n upsampler_model = model_from_config(MODEL_CONFIGS['upsample'], device)\n upsampler_model.eval()\n upsampler_diffusion = diffusion_from_config(DIFFUSION_CONFIGS['upsample'])\n print('downloading base checkpoint...')\n base_model.load_state_dict(load_checkpoint(base_name, device))\n print('downloading upsampler checkpoint...')\n upsampler_model.load_state_dict(load_checkpoint('upsample', device))\n sampler = PointCloudSampler(\n device=device,\n models=[base_model, upsampler_model],\n diffusions=[base_diffusion, upsampler_diffusion],\n num_points=[1024, 4096 - 1024],\n aux_channels=['R', 'G', 'B'],\n guidance_scale=[3.0, 0.0],\n model_kwargs_key_filter=('texts', ''), # Do not condition the upsampler at all\n )\n # Produce a sample from the model.\n samples = None\n for x in tqdm(sampler.sample_batch_progressive(batch_size=1, model_kwargs=dict(texts=[prompt]))):\n samples = x\n\n pc = sampler.output_to_point_clouds(samples)[0]\n xyz = pc.coords\n rgb = np.zeros_like(xyz)\n rgb[:,0],rgb[:,1],rgb[:,2] = pc.channels['R'],pc.channels['G'],pc.channels['B']\n return xyz,rgb"
},
{
"identifier": "SH2RGB",
"path": "utils/sh_utils.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
},
{
"identifier": "inverse_sigmoid_np",
"path": "utils/general_utils.py",
"snippet": "def inverse_sigmoid_np(x):\n return np.log(x/(1-x))"
},
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_background(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def update_feature_learning_rate(self, iteration):\n def update_rotation_learning_rate(self, iteration):\n def update_scaling_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)"
}
] | import os
import sys
import torch
import random
import torch.nn.functional as F
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from utils.pointe_utils import init_from_pointe
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from utils.general_utils import inverse_sigmoid_np
from scene.gaussian_model import BasicPointCloud | 1,413 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class RandCameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
width: int
height: int
delta_polar : np.array
delta_azimuth : np.array
delta_radius : np.array
class SceneInfo(NamedTuple):
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class RandCameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
width: int
height: int
delta_polar : np.array
delta_azimuth : np.array
delta_radius : np.array
class SceneInfo(NamedTuple): | point_cloud: BasicPointCloud | 6 | 2023-11-18 08:05:50+00:00 | 2k |
VRSEN/agency-swarm | agency_swarm/tools/browsing/GoBack.py | [
{
"identifier": "BaseTool",
"path": "agency_swarm/tools/base_tool.py",
"snippet": "class BaseTool(OpenAISchema, ABC):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n # # Exclude 'run' method from Pydantic model fields\n # self.model_fields.pop(\"run\", None)\n\n @abstractmethod\n def run(self, **kwargs):\n pass"
},
{
"identifier": "get_web_driver",
"path": "agency_swarm/tools/browsing/util/selenium.py",
"snippet": "def get_web_driver():\n try:\n from selenium import webdriver\n from selenium.webdriver.chrome.service import Service as ChromeService\n except ImportError:\n print(\"Selenium not installed. Please install it with pip install selenium\")\n raise ImportError\n\n try:\n from webdriver_manager.chrome import ChromeDriverManager\n except ImportError:\n print(\"webdriver_manager not installed. Please install it with pip install webdriver-manager\")\n raise ImportError\n\n try:\n from selenium_stealth import stealth\n except ImportError:\n print(\"selenium_stealth not installed. Please install it with pip install selenium-stealth\")\n raise ImportError\n\n global wd\n\n if wd:\n return wd\n\n global selenium_config\n chrome_profile_path = selenium_config.get(\"chrome_profile_path\", None)\n profile_directory = None\n user_data_dir = None\n if isinstance(chrome_profile_path, str) and os.path.exists(chrome_profile_path):\n profile_directory = os.path.split(chrome_profile_path)[-1].strip(\"\\\\\").rstrip(\"/\")\n user_data_dir = os.path.split(chrome_profile_path)[0].strip(\"\\\\\").rstrip(\"/\")\n print(f\"Using Chrome profile: {profile_directory}\")\n print(f\"Using Chrome user data dir: {user_data_dir}\")\n print(f\"Using Chrome profile path: {chrome_profile_path}\")\n\n chrome_options = webdriver.ChromeOptions()\n # Removed headless and other options for debugging purposes\n\n chrome_driver_path = ChromeDriverManager().install()\n\n if selenium_config.get(\"headless\", False):\n chrome_options.add_argument('--headless')\n chrome_options.add_argument(\"--window-size=960,1080\")\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--disable-popup-blocking\")\n chrome_options.add_argument(\"--disable-web-security\")\n chrome_options.add_argument(\"--allow-running-insecure-content\")\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n chrome_options.add_experimental_option('useAutomationExtension', False)\n if user_data_dir and profile_directory:\n chrome_options.add_argument(f\"user-data-dir={user_data_dir}\")\n chrome_options.add_argument(f\"profile-directory={profile_directory}\")\n\n try:\n wd = webdriver.Chrome(service=ChromeService(chrome_driver_path), options=chrome_options)\n print(\"WebDriver initialized successfully.\")\n # Print the actual profile path being used\n if wd.capabilities['chrome']['userDataDir']:\n print(f\"Profile path in use: {wd.capabilities['chrome']['userDataDir']}\")\n except Exception as e:\n print(f\"Error initializing WebDriver: {e}\")\n raise\n\n stealth(\n wd,\n languages=[\"en-US\", \"en\"],\n vendor=\"Google Inc.\",\n platform=\"Win32\",\n webgl_vendor=\"Intel Inc.\",\n renderer=\"Intel Iris OpenGL Engine\",\n fix_hairline=True,\n )\n\n # wd.set_window_size(960, 1080)\n wd.implicitly_wait(3)\n\n return wd"
},
{
"identifier": "set_web_driver",
"path": "agency_swarm/tools/browsing/util/selenium.py",
"snippet": "def set_web_driver(new_wd):\n global wd\n wd = remove_highlight_and_labels(wd)\n wd = new_wd"
}
] | import time
from agency_swarm.tools import BaseTool
from agency_swarm.tools.browsing.util.selenium import get_web_driver, set_web_driver | 982 |
class GoBack(BaseTool):
"""
This tool allows you to go back 1 page in the browser history. Use it in case of a mistake or if a page shows you unexpected content.
"""
def run(self):
|
class GoBack(BaseTool):
"""
This tool allows you to go back 1 page in the browser history. Use it in case of a mistake or if a page shows you unexpected content.
"""
def run(self): | wd = get_web_driver() | 1 | 2023-11-16 02:29:26+00:00 | 2k |
resemble-ai/resemble-enhance | resemble_enhance/denoiser/denoiser.py | [
{
"identifier": "MelSpectrogram",
"path": "resemble_enhance/melspec.py",
"snippet": "class MelSpectrogram(nn.Module):\n def __init__(self, hp: HParams):\n \"\"\"\n Torch implementation of Resemble's mel extraction.\n Note that the values are NOT identical to librosa's implementation\n due to floating point precisions.\n \"\"\"\n super().__init__()\n self.hp = hp\n self.melspec = TorchMelSpectrogram(\n hp.wav_rate,\n n_fft=hp.n_fft,\n win_length=hp.win_size,\n hop_length=hp.hop_size,\n f_min=0,\n f_max=hp.wav_rate // 2,\n n_mels=hp.num_mels,\n power=1,\n normalized=False,\n # NOTE: Folowing librosa's default.\n pad_mode=\"constant\",\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )\n self.register_buffer(\"stft_magnitude_min\", torch.FloatTensor([hp.stft_magnitude_min]))\n self.min_level_db = 20 * np.log10(hp.stft_magnitude_min)\n self.preemphasis = hp.preemphasis\n self.hop_size = hp.hop_size\n\n def forward(self, wav, pad=True):\n \"\"\"\n Args:\n wav: [B, T]\n \"\"\"\n device = wav.device\n if wav.is_mps:\n wav = wav.cpu()\n self.to(wav.device)\n if self.preemphasis > 0:\n wav = torch.nn.functional.pad(wav, [1, 0], value=0)\n wav = wav[..., 1:] - self.preemphasis * wav[..., :-1]\n mel = self.melspec(wav)\n mel = self._amp_to_db(mel)\n mel_normed = self._normalize(mel)\n assert not pad or mel_normed.shape[-1] == 1 + wav.shape[-1] // self.hop_size # Sanity check\n mel_normed = mel_normed.to(device)\n return mel_normed # (M, T)\n\n def _normalize(self, s, headroom_db=15):\n return (s - self.min_level_db) / (-self.min_level_db + headroom_db)\n\n def _amp_to_db(self, x):\n return x.clamp_min(self.hp.stft_magnitude_min).log10() * 20"
},
{
"identifier": "HParams",
"path": "resemble_enhance/denoiser/hparams.py",
"snippet": "class HParams(HParamsBase):\n batch_size_per_gpu: int = 128\n distort_prob: float = 0.5"
},
{
"identifier": "UNet",
"path": "resemble_enhance/denoiser/unet.py",
"snippet": "class UNet(nn.Module):\n def __init__(self, input_dim, output_dim, hidden_dim=16, num_blocks=4, num_middle_blocks=2):\n super().__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.input_proj = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)\n self.encoder_blocks = nn.ModuleList(\n [\n UNetBlock(input_dim=hidden_dim * 2**i, output_dim=hidden_dim * 2 ** (i + 1), scale_factor=0.5)\n for i in range(num_blocks)\n ]\n )\n self.middle_blocks = nn.ModuleList(\n [UNetBlock(input_dim=hidden_dim * 2**num_blocks) for _ in range(num_middle_blocks)]\n )\n self.decoder_blocks = nn.ModuleList(\n [\n UNetBlock(input_dim=hidden_dim * 2 ** (i + 1), output_dim=hidden_dim * 2**i, scale_factor=2)\n for i in reversed(range(num_blocks))\n ]\n )\n self.head = nn.Sequential(\n nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1),\n nn.GELU(),\n nn.Conv2d(hidden_dim, output_dim, 1),\n )\n\n @property\n def scale_factor(self):\n return 2 ** len(self.encoder_blocks)\n\n def pad_to_fit(self, x):\n \"\"\"\n Args:\n x: (b c h w), input\n Returns:\n x: (b c h' w'), padded input\n \"\"\"\n hpad = (self.scale_factor - x.shape[2] % self.scale_factor) % self.scale_factor\n wpad = (self.scale_factor - x.shape[3] % self.scale_factor) % self.scale_factor\n return F.pad(x, (0, wpad, 0, hpad))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: (b c h w), input\n Returns:\n o: (b c h w), output\n \"\"\"\n shape = x.shape\n\n x = self.pad_to_fit(x)\n x = self.input_proj(x)\n\n s_list = []\n for block in self.encoder_blocks:\n x, s = block(x)\n s_list.append(s)\n\n for block in self.middle_blocks:\n x, _ = block(x)\n\n for block, s in zip(self.decoder_blocks, reversed(s_list)):\n x, _ = block(x, s)\n\n x = self.head(x)\n x = x[..., : shape[2], : shape[3]]\n\n return x\n\n def test(self, shape=(3, 512, 256)):\n import ptflops\n\n macs, params = ptflops.get_model_complexity_info(\n self,\n shape,\n as_strings=True,\n print_per_layer_stat=True,\n verbose=True,\n )\n\n print(f\"macs: {macs}\")\n print(f\"params: {params}\")"
}
] | import logging
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from ..melspec import MelSpectrogram
from .hparams import HParams
from .unet import UNet | 1,595 |
logger = logging.getLogger(__name__)
def _normalize(x: Tensor) -> Tensor:
return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7)
class Denoiser(nn.Module):
@property
def stft_cfg(self) -> dict:
hop_size = self.hp.hop_size
return dict(hop_length=hop_size, n_fft=hop_size * 4, win_length=hop_size * 4)
@property
def n_fft(self):
return self.stft_cfg["n_fft"]
@property
def eps(self):
return 1e-7
def __init__(self, hp: HParams):
super().__init__()
self.hp = hp
self.net = UNet(input_dim=3, output_dim=3)
|
logger = logging.getLogger(__name__)
def _normalize(x: Tensor) -> Tensor:
return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7)
class Denoiser(nn.Module):
@property
def stft_cfg(self) -> dict:
hop_size = self.hp.hop_size
return dict(hop_length=hop_size, n_fft=hop_size * 4, win_length=hop_size * 4)
@property
def n_fft(self):
return self.stft_cfg["n_fft"]
@property
def eps(self):
return 1e-7
def __init__(self, hp: HParams):
super().__init__()
self.hp = hp
self.net = UNet(input_dim=3, output_dim=3) | self.mel_fn = MelSpectrogram(hp) | 0 | 2023-11-15 08:15:51+00:00 | 2k |
PKU-YuanGroup/Chat-UniVi | ChatUniVi/model/builder.py | [
{
"identifier": "DEFAULT_IMAGE_PATCH_TOKEN",
"path": "ChatUniVi/constants.py",
"snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "ChatUniVi/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "ChatUniVi/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
}
] | import os
import shutil
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
from ChatUniVi.model import *
from ChatUniVi.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
from transformers import AutoConfig, AutoModelForCausalLM
from huggingface_hub import hf_hub_download
from peft import PeftModel
from peft import PeftModel | 1,265 |
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto"):
kwargs = {"device_map": device_map}
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
if 'chatunivi' in model_name.lower():
# Load ChatUniVi model
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading ChatUniVi from base model...')
model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional ChatUniVi weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading ChatUniVi from base model...')
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'chatunivi' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
|
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto"):
kwargs = {"device_map": device_map}
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
if 'chatunivi' in model_name.lower():
# Load ChatUniVi model
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading ChatUniVi from base model...')
model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional ChatUniVi weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading ChatUniVi from base model...')
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = ChatUniViLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'chatunivi' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token: | tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) | 0 | 2023-11-13 11:52:56+00:00 | 2k |
tatsu-lab/gpt_paper_assistant | filter_papers.py | [
{
"identifier": "Paper",
"path": "arxiv_scraper.py",
"snippet": "class Paper:\n # paper class should track the list of authors, paper title, abstract, arxiv id\n authors: List[str]\n title: str\n abstract: str\n arxiv_id: str\n\n # add a hash function using arxiv_id\n def __hash__(self):\n return hash(self.arxiv_id)"
},
{
"identifier": "EnhancedJSONEncoder",
"path": "arxiv_scraper.py",
"snippet": "class EnhancedJSONEncoder(json.JSONEncoder):\n def default(self, o):\n if dataclasses.is_dataclass(o):\n return dataclasses.asdict(o)\n return super().default(o)"
}
] | import configparser
import dataclasses
import json
import os
import re
import retry
from collections import defaultdict
from typing import List
from openai import OpenAI
from tqdm import tqdm
from arxiv_scraper import Paper
from arxiv_scraper import EnhancedJSONEncoder | 1,210 |
def filter_by_author(all_authors, papers, author_targets, config):
# filter and parse the papers
selected_papers = {} # pass to output
all_papers = {} # dict for later filtering
sort_dict = {} # dict storing key and score
# author based selection
for paper in papers:
all_papers[paper.arxiv_id] = paper
if config["FILTERING"].getboolean("author_match"):
for author in paper.authors:
if author in all_authors:
for alias in all_authors[author]:
if alias["authorId"] in author_targets:
selected_papers[paper.arxiv_id] = {
**dataclasses.asdict(paper),
**{"COMMENT": "Author match"},
}
sort_dict[paper.arxiv_id] = float(
config["SELECTION"]["author_match_score"]
)
break
return selected_papers, all_papers, sort_dict
def filter_papers_by_hindex(all_authors, papers, config):
# filters papers by checking to see if there's at least one author with > hcutoff hindex
paper_list = []
for paper in papers:
max_h = 0
for author in paper.authors:
if author in all_authors:
max_h = max(
max_h, max([alias["hIndex"] for alias in all_authors[author]])
)
if max_h >= float(config["FILTERING"]["hcutoff"]):
paper_list.append(paper)
return paper_list
def calc_price(model, usage):
if model == "gpt-4-1106-preview":
return (0.01 * usage.prompt_tokens + 0.03 * usage.completion_tokens) / 1000.0
if model == "gpt-4":
return (0.03 * usage.prompt_tokens + 0.06 * usage.completion_tokens) / 1000.0
if (model == "gpt-3.5-turbo") or (model == "gpt-3.5-turbo-1106"):
return (0.0015 * usage.prompt_tokens + 0.002 * usage.completion_tokens) / 1000.0
@retry.retry(tries=3, delay=2)
def call_chatgpt(full_prompt, openai_client, model, num_samples):
return openai_client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": full_prompt}],
temperature=0.0,
n=int(num_samples),
seed=0,
)
def run_and_parse_chatgpt(full_prompt, openai_client, config):
# just runs the chatgpt prompt, tries to parse the resulting JSON
completion = call_chatgpt(
full_prompt,
openai_client,
config["SELECTION"]["model"],
config["FILTERING"]["num_samples"],
)
json_dicts = defaultdict(list)
for choice in completion.choices:
out_text = choice.message.content
out_text = re.sub("```jsonl\n", "", out_text)
out_text = re.sub("```", "", out_text)
out_text = re.sub(r"\n+", "\n", out_text)
out_text = re.sub("},", "}", out_text).strip()
# split out_text line by line and parse each as a json.
for line in out_text.split("\n"):
# try catch block to attempt to parse json
try:
loaded_output = json.loads(line)
json_dicts[loaded_output["ARXIVID"]].append(loaded_output)
except Exception as ex:
if config["OUTPUT"].getboolean("debug_messages"):
print("Exception happened " + str(ex))
print("Failed to parse LM output as json")
print(out_text)
print("RAW output")
print(completion.choices[0].message.content)
continue
all_dict = []
for id, json_list in json_dicts.items():
rel_score = sum([float(jdict["RELEVANCE"]) for jdict in json_list]) / float(
len(json_list)
)
nov_score = sum([float(jdict["NOVELTY"]) for jdict in json_list]) / float(
len(json_list)
)
new_dict = {
"ARXIVID": json_list[0]["ARXIVID"],
"COMMENT": json_list[0]["COMMENT"],
"RELEVANCE": rel_score,
"NOVELTY": nov_score,
}
all_dict.append(new_dict)
return all_dict, calc_price(config["SELECTION"]["model"], completion.usage)
|
def filter_by_author(all_authors, papers, author_targets, config):
# filter and parse the papers
selected_papers = {} # pass to output
all_papers = {} # dict for later filtering
sort_dict = {} # dict storing key and score
# author based selection
for paper in papers:
all_papers[paper.arxiv_id] = paper
if config["FILTERING"].getboolean("author_match"):
for author in paper.authors:
if author in all_authors:
for alias in all_authors[author]:
if alias["authorId"] in author_targets:
selected_papers[paper.arxiv_id] = {
**dataclasses.asdict(paper),
**{"COMMENT": "Author match"},
}
sort_dict[paper.arxiv_id] = float(
config["SELECTION"]["author_match_score"]
)
break
return selected_papers, all_papers, sort_dict
def filter_papers_by_hindex(all_authors, papers, config):
# filters papers by checking to see if there's at least one author with > hcutoff hindex
paper_list = []
for paper in papers:
max_h = 0
for author in paper.authors:
if author in all_authors:
max_h = max(
max_h, max([alias["hIndex"] for alias in all_authors[author]])
)
if max_h >= float(config["FILTERING"]["hcutoff"]):
paper_list.append(paper)
return paper_list
def calc_price(model, usage):
if model == "gpt-4-1106-preview":
return (0.01 * usage.prompt_tokens + 0.03 * usage.completion_tokens) / 1000.0
if model == "gpt-4":
return (0.03 * usage.prompt_tokens + 0.06 * usage.completion_tokens) / 1000.0
if (model == "gpt-3.5-turbo") or (model == "gpt-3.5-turbo-1106"):
return (0.0015 * usage.prompt_tokens + 0.002 * usage.completion_tokens) / 1000.0
@retry.retry(tries=3, delay=2)
def call_chatgpt(full_prompt, openai_client, model, num_samples):
return openai_client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": full_prompt}],
temperature=0.0,
n=int(num_samples),
seed=0,
)
def run_and_parse_chatgpt(full_prompt, openai_client, config):
# just runs the chatgpt prompt, tries to parse the resulting JSON
completion = call_chatgpt(
full_prompt,
openai_client,
config["SELECTION"]["model"],
config["FILTERING"]["num_samples"],
)
json_dicts = defaultdict(list)
for choice in completion.choices:
out_text = choice.message.content
out_text = re.sub("```jsonl\n", "", out_text)
out_text = re.sub("```", "", out_text)
out_text = re.sub(r"\n+", "\n", out_text)
out_text = re.sub("},", "}", out_text).strip()
# split out_text line by line and parse each as a json.
for line in out_text.split("\n"):
# try catch block to attempt to parse json
try:
loaded_output = json.loads(line)
json_dicts[loaded_output["ARXIVID"]].append(loaded_output)
except Exception as ex:
if config["OUTPUT"].getboolean("debug_messages"):
print("Exception happened " + str(ex))
print("Failed to parse LM output as json")
print(out_text)
print("RAW output")
print(completion.choices[0].message.content)
continue
all_dict = []
for id, json_list in json_dicts.items():
rel_score = sum([float(jdict["RELEVANCE"]) for jdict in json_list]) / float(
len(json_list)
)
nov_score = sum([float(jdict["NOVELTY"]) for jdict in json_list]) / float(
len(json_list)
)
new_dict = {
"ARXIVID": json_list[0]["ARXIVID"],
"COMMENT": json_list[0]["COMMENT"],
"RELEVANCE": rel_score,
"NOVELTY": nov_score,
}
all_dict.append(new_dict)
return all_dict, calc_price(config["SELECTION"]["model"], completion.usage)
| def paper_to_string(paper_entry: Paper) -> str: | 0 | 2023-11-13 15:19:38+00:00 | 2k |
BobaZooba/xllm | tests/unit/datasets/test_registry.py | [
{
"identifier": "enums",
"path": "src/xllm/enums.py",
"snippet": "class General:\nclass Transformers:\nclass Registry:\nclass Datasets:\nclass Collators:\nclass Trainers:\nclass Experiments:\nclass EnvironmentVariables:\nclass LogLevel:"
},
{
"identifier": "datasets_registry",
"path": "src/xllm/datasets/registry.py",
"snippet": ""
},
{
"identifier": "SodaDataset",
"path": "src/xllm/datasets/soda.py",
"snippet": "class SodaDataset(BaseDataset):\n HEADER_KEY = \"header\"\n DIALOG_KEY = \"dialog\"\n\n _HF_DATASET_ID = \"allenai/soda\"\n\n def __init__(self, data: List[RawSample], header_drop_probability: float = 0.05):\n super().__init__(data=data)\n self.header_drop_probability = header_drop_probability\n\n @classmethod\n def get_data(cls, config: Config) -> Optional[Tuple[List[RawSample], Optional[List[RawSample]]]]:\n soda_dataset = datasets.load_dataset(cls._HF_DATASET_ID)\n\n parsed_data: Dict[str, List[RawSample]] = dict()\n\n known_indices = set()\n\n for split in [\"train\", \"test\"]:\n parsed_data[split] = list()\n\n for sample in tqdm(soda_dataset[split], desc=f\"Parsing SODA {split}\"):\n index = sample.get(\"original_index\")\n\n if index in known_indices:\n continue\n\n parsed_sample = {\n cls.HEADER_KEY: sample.get(\"narrative\"),\n cls.DIALOG_KEY: [\n f\"{speaker}: {phrase}\"\n for speaker, phrase in zip(sample.get(\"speakers\"), sample.get(\"dialogue\"))\n ],\n }\n\n parsed_data[split].append(parsed_sample)\n known_indices.add(index)\n\n train = parsed_data[\"train\"]\n valid = parsed_data[\"test\"]\n\n return train, valid\n\n def get_sample(self, index: int) -> RawSample:\n sample = self.data[index]\n\n dialog = sample[self.DIALOG_KEY]\n\n phrases = list()\n\n if not isinstance(dialog, list):\n raise ValueError(f\"{self.DIALOG_KEY} of sample is not a list: {type(dialog)}\")\n\n for phrase in dialog:\n if isinstance(phrase, str):\n phrases.append(phrase)\n\n if self.HEADER_KEY in sample:\n header = sample[self.HEADER_KEY]\n\n is_drop_header = np.random.rand() <= self.header_drop_probability\n\n if not is_drop_header and isinstance(header, str):\n phrases.insert(0, header)\n\n sample = {enums.General.text_parts: [phrase.replace(\"\\n\", \" \").replace(\"\\r\", \" \") for phrase in phrases]}\n\n return sample"
},
{
"identifier": "DATA",
"path": "tests/helpers/dummy_data.py",
"snippet": "DATA = [\n {\n enums.General.text_parts: [\n \"Person 1: Hello\",\n \"Person 2: It's me\",\n \"Person 1: I was wondering\",\n ]\n },\n {\n enums.General.text_parts: [\n \"You are a sith lord\",\n \"Kenobi: Hello there\",\n \"General Grievous: General Kenobi\",\n ]\n },\n]"
}
] | from src.xllm import enums
from src.xllm.datasets.registry import datasets_registry
from src.xllm.datasets.soda import SodaDataset
from tests.helpers.dummy_data import DATA | 967 | # Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_get_soda_dataset() -> None:
dataset_cls = datasets_registry.get(key=enums.Datasets.soda)
dataset = dataset_cls(data=DATA)
| # Copyright 2023 Boris Zubarev. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def test_get_soda_dataset() -> None:
dataset_cls = datasets_registry.get(key=enums.Datasets.soda)
dataset = dataset_cls(data=DATA) | assert isinstance(dataset, SodaDataset) | 2 | 2023-11-10 17:55:03+00:00 | 2k |
banodoco/Steerable-Motion | imports/AdvancedControlNet/latent_keyframe_nodes.py | [
{
"identifier": "LatentKeyframeImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class LatentKeyframeImport:\n def __init__(self, batch_index: int, strength: float) -> None:\n self.batch_index = batch_index\n self.strength = strength"
},
{
"identifier": "LatentKeyframeGroupImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class LatentKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[LatentKeyframeImport] = []\n\n def add(self, keyframe: LatentKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same batch_index\n for i in range(len(self.keyframes)):\n if self.keyframes[i].batch_index == keyframe.batch_index:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.batch_index)\n \n def get_index(self, index: int) -> Union[LatentKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def __getitem__(self, index) -> LatentKeyframeImport:\n return self.keyframes[index]\n \n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n\n def clone(self) -> 'LatentKeyframeGroupImport':\n cloned = LatentKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned"
},
{
"identifier": "StrengthInterpolationImport",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "class StrengthInterpolationImport:\n LINEAR = \"linear\"\n EASE_IN = \"ease-in\"\n EASE_OUT = \"ease-out\"\n EASE_IN_OUT = \"ease-in-out\"\n NONE = \"none\""
},
{
"identifier": "logger",
"path": "imports/AdvancedControlNet/logger.py",
"snippet": "class ColoredFormatter(logging.Formatter):\n COLORS = {\n \"DEBUG\": \"\\033[0;36m\", # CYAN\n \"INFO\": \"\\033[0;32m\", # GREEN\n \"WARNING\": \"\\033[0;33m\", # YELLOW\n \"ERROR\": \"\\033[0;31m\", # RED\n \"CRITICAL\": \"\\033[0;37;41m\", # WHITE ON RED\n \"RESET\": \"\\033[0m\", # RESET COLOR\n }\n def format(self, record):"
}
] | from typing import Union
from collections.abc import Iterable
from .control import LatentKeyframeImport, LatentKeyframeGroupImport
from .control import StrengthInterpolationImport as SI
from .logger import logger
import numpy as np | 934 |
class LatentKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"batch_index": ("INT", {"default": 0, "min": -1000, "max": 1000, "step": 1}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
},
"optional": {
"prev_latent_kf": ("LATENT_KEYFRAME", ),
}
}
RETURN_NAMES = ("LATENT_KF", )
RETURN_TYPES = ("LATENT_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
batch_index: int,
strength: float,
prev_latent_kf: LatentKeyframeGroupImport=None,
prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name
):
prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf
if not prev_latent_keyframe:
prev_latent_keyframe = LatentKeyframeGroupImport()
else:
prev_latent_keyframe = prev_latent_keyframe.clone()
|
class LatentKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"batch_index": ("INT", {"default": 0, "min": -1000, "max": 1000, "step": 1}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
},
"optional": {
"prev_latent_kf": ("LATENT_KEYFRAME", ),
}
}
RETURN_NAMES = ("LATENT_KF", )
RETURN_TYPES = ("LATENT_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
batch_index: int,
strength: float,
prev_latent_kf: LatentKeyframeGroupImport=None,
prev_latent_keyframe: LatentKeyframeGroupImport=None, # old name
):
prev_latent_keyframe = prev_latent_keyframe if prev_latent_keyframe else prev_latent_kf
if not prev_latent_keyframe:
prev_latent_keyframe = LatentKeyframeGroupImport()
else:
prev_latent_keyframe = prev_latent_keyframe.clone() | keyframe = LatentKeyframeImport(batch_index, strength) | 0 | 2023-11-11 01:26:26+00:00 | 2k |
innovatorved/subtitle | app/core/app.py | [
{
"identifier": "model_names",
"path": "app/models/models.py",
"snippet": "def download_model(model_name):\ndef download_file(url, filepath):"
},
{
"identifier": "check_models_exist",
"path": "app/utils/checks.py",
"snippet": "def check_models_exist(name: str):\n try:\n if model_names[name] in os.listdir(os.path.join(os.getcwd(), \"models\")):\n print(\"Model {} exists\".format(name))\n else:\n print(\"Model {} does not exist\".format(name))\n download_model(key)\n return True\n except Exception as exc:\n print(\"Error in check_models_exist: {}\".format(str(exc)))\n return False"
},
{
"identifier": "generate_vtt_file",
"path": "app/utils/utils.py",
"snippet": "def generate_vtt_file(path: str = None, model=\"ggml-tiny.bin\"):\n \"\"\"./whisper -m models/ggml-tiny.en.bin -f Rev.mp3 out.wav -nt --output-vtt\"\"\"\n try:\n if path is None or not chack_file_exist(path):\n raise Exception(\"PATH Error!\")\n rand = uuid.uuid4()\n output_audio_path: str = f\"data/{rand}.wav\"\n vtt_file_path: str = f\"data/{rand}.wav.vtt\"\n command: str = f\"./binary/whisper -t {NO_OF_THREADS} -p {NO_OF_PROCESSORS} -m models/{model} -f {path} {output_audio_path} -nt --output-vtt\"\n execute_command(command)\n return [rand, output_audio_path, vtt_file_path]\n except Exception as exc:\n logging.error(exc)\n raise Exception(exc.__str__())"
},
{
"identifier": "merge_video_and_vtt",
"path": "app/utils/utils.py",
"snippet": "def merge_video_and_vtt(video_path, vtt_path, output_path):\n try:\n if not chack_file_exist(video_path):\n raise Exception(\"Video File Not Found!\")\n if not chack_file_exist(vtt_path):\n raise Exception(\"VTT File Not Found!\")\n\n # Load the input files\n video = ffmpeg.input(video_path)\n subtitles = ffmpeg.input(vtt_path)\n\n merged = ffmpeg.output(\n video,\n subtitles,\n output_path,\n vcodec=\"copy\",\n scodec=\"mov_text\",\n )\n\n ffmpeg.run(merged, overwrite_output=True)\n\n return True\n except Exception as exc:\n raise Exception(f\"An error occurred: {exc}\")"
}
] | import logging
from app.models import download_model, model_names
from app.utils.checks import check_models_exist
from app.utils import generate_vtt_file, merge_video_and_vtt | 675 |
# Configure logging
logger = logging.getLogger(__name__)
def process_video(file, model="base"):
"""
add_subtitle_in_video
@param file: video file path
@param model: model name
@return: [vtt_file_path , output_file]
"""
try:
if not check_models_exist(model):
download_model(model)
output_file = f"{file.split('.')[0]}_subtitled.{file.split('.')[1]}"
|
# Configure logging
logger = logging.getLogger(__name__)
def process_video(file, model="base"):
"""
add_subtitle_in_video
@param file: video file path
@param model: model name
@return: [vtt_file_path , output_file]
"""
try:
if not check_models_exist(model):
download_model(model)
output_file = f"{file.split('.')[0]}_subtitled.{file.split('.')[1]}" | process_id, output_audio_path, vtt_file_path = generate_vtt_file( | 2 | 2023-11-17 10:12:33+00:00 | 2k |
x0rzavi/github-readme-terminal | gifos/utils/upload_imgbb.py | [
{
"identifier": "gifos_settings",
"path": "gifos/utils/load_config.py",
"snippet": "def load_toml(file_name: str) -> dict:\n def __update_config_with_env_vars(config, prefix=\"GIFOS\"):"
},
{
"identifier": "ImgbbImage",
"path": "gifos/utils/schemas/imagebb_image.py",
"snippet": "class ImgbbImage:\n \"\"\"A class to represent an image uploaded to ImgBB.\n\n This class represents an image uploaded to ImgBB.\n\n Attributes:\n id: A string that represents the image's ID on ImgBB.\n url: A string that represents the image's URL on ImgBB.\n delete_url: A string that represents the URL to delete the image from ImgBB.\n file_name: A string that represents the name of the image file.\n expiration: A string that represents the expiration time of the image.\n size: A string that represents the size of the image.\n mime: A string that represents the MIME type of the image.\n extension: A string that represents the extension of the image file.\n \"\"\"\n\n __slots__ = [\n \"id\",\n \"url\",\n \"delete_url\",\n \"file_name\",\n \"expiration\",\n \"size\",\n \"mime\",\n \"extension\",\n ]\n id: str\n url: str\n delete_url: str\n file_name: str\n expiration: str\n size: str\n mime: str\n extension: str"
}
] | from base64 import b64encode
from dotenv import load_dotenv
from gifos.utils.load_config import gifos_settings
from gifos.utils.schemas.imagebb_image import ImgbbImage
import os
import requests
import sys | 723 |
"""This module contains a function for uploading an image to ImgBB."""
load_dotenv()
IMGBB_API_KEY = os.getenv("IMGBB_API_KEY")
ENDPOINT = "https://api.imgbb.com/1/upload"
def upload_imgbb(file_name: str, expiration: int = None) -> ImgbbImage:
"""Upload an image to ImgBB.
This function uploads an image to ImgBB using the ImgBB API. The function reads the
image file, encodes it in base64, and sends a POST request to the ImgBB API. The
function uses the `IMGBB_API_KEY` environment variable for authentication and the
`ENDPOINT` constant for the API endpoint. If the `debug` configuration value is
True, the function sets the image expiration time to 10 minutes.
:param file_name: The name of the image file to upload.
:type file_name: str
:param expiration: The expiration time for the image in seconds. If the `debug`
configuration value is True, this parameter is ignored and the expiration time
is set to 10 minutes. The value must be between 60 and 15552000 (6 months) if
provided.
:type expiration: int, optional
:return: An `ImgbbImage` object containing the uploaded image's information if the
upload is successful, otherwise None.
:rtype: ImgbbImage or None
"""
if not IMGBB_API_KEY:
print("ERROR: Please provide IMGBB_API_KEY")
sys.exit(1)
|
"""This module contains a function for uploading an image to ImgBB."""
load_dotenv()
IMGBB_API_KEY = os.getenv("IMGBB_API_KEY")
ENDPOINT = "https://api.imgbb.com/1/upload"
def upload_imgbb(file_name: str, expiration: int = None) -> ImgbbImage:
"""Upload an image to ImgBB.
This function uploads an image to ImgBB using the ImgBB API. The function reads the
image file, encodes it in base64, and sends a POST request to the ImgBB API. The
function uses the `IMGBB_API_KEY` environment variable for authentication and the
`ENDPOINT` constant for the API endpoint. If the `debug` configuration value is
True, the function sets the image expiration time to 10 minutes.
:param file_name: The name of the image file to upload.
:type file_name: str
:param expiration: The expiration time for the image in seconds. If the `debug`
configuration value is True, this parameter is ignored and the expiration time
is set to 10 minutes. The value must be between 60 and 15552000 (6 months) if
provided.
:type expiration: int, optional
:return: An `ImgbbImage` object containing the uploaded image's information if the
upload is successful, otherwise None.
:rtype: ImgbbImage or None
"""
if not IMGBB_API_KEY:
print("ERROR: Please provide IMGBB_API_KEY")
sys.exit(1)
| if gifos_settings.get("general", {}).get("debug"): | 0 | 2023-11-17 06:21:18+00:00 | 2k |
Zaloog/kanban-python | src/kanban_python/interface.py | [
{
"identifier": "cfg",
"path": "src/kanban_python/config.py",
"snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def active_board(self, new_board):\n def kanban_boards(self) -> list:\n def kanban_boards_dict(self) -> dict:\n def kanban_boards_dict(self, board_name: str) -> dict:\n def active_board_path(self) -> str:\n def show_footer(self):\n def show_footer(self, visible):\n def col_min_width(self) -> int:\n def col_min_width(self, new_width: int) -> None:\n def kanban_columns_dict(self) -> dict:\n def kanban_columns_dict(self, updated_dict) -> dict:\n def vis_cols(self) -> list:\n def done_limit(self) -> int:\n def done_limit(self, new_limit: int) -> None:\n def scanned_files(self) -> list:\n def scanned_files(self, new_files_to_scan: str) -> None:\n def scanned_patterns(self) -> list:\n def scanned_patterns(self, new_patterns_to_scan: str) -> None:\ndef create_init_config(conf_path=CONFIG_PATH, data_path=DATA_PATH):\ndef delete_current_folder_board_from_config(\n cfg=cfg, curr_path: str = str(Path.cwd())\n) -> None:\ndef check_if_board_name_exists_in_config(boardname: str, cfg=cfg) -> bool:\ndef check_if_current_active_board_in_board_list(cfg=cfg) -> bool:\ndef delete_board_from_config(board_name, cfg=cfg) -> None:\ndef check_config_exists(path=CONFIG_FILE_PATH) -> bool:\ndef get_json_path(boardname: str):"
},
{
"identifier": "BOARD_CAPTION_STRING",
"path": "src/kanban_python/constants.py",
"snippet": "BOARD_CAPTION_STRING = \"Tasks have the following Structure:\\\n [[cyan]ID[/]] ([orange3]TAG[/]) [white]Task Title[/] |[red]Days Left[/]|\""
},
{
"identifier": "COLOR_DICT",
"path": "src/kanban_python/constants.py",
"snippet": "COLOR_DICT = {\n \"Ready\": \"[red]Ready[/]\",\n \"Doing\": \"[yellow]Doing[/]\",\n \"Done\": \"[green]Done[/]\",\n \"Deleted\": \"[deep_pink4]Deleted[/]\",\n \"Archived\": \"[dark_goldenrod]Archived[/]\",\n}"
},
{
"identifier": "CONFIG_FILE_PATH",
"path": "src/kanban_python/constants.py",
"snippet": "CONFIG_FILE_PATH = CONFIG_PATH / CONFIG_FILE_NAME"
},
{
"identifier": "FOOTER",
"path": "src/kanban_python/constants.py",
"snippet": "FOOTER = [FOOTER_FIRST, FOOTER_LAST]"
},
{
"identifier": "REPORT_COLORS",
"path": "src/kanban_python/constants.py",
"snippet": "REPORT_COLORS = [\"#161b22\", \"#0e4429\", \"#006d32\", \"#26a641\", \"#39d353\"]"
},
{
"identifier": "calculate_days_left_till_due",
"path": "src/kanban_python/utils.py",
"snippet": "def get_motivational_quote() -> str:\ndef current_time_to_str() -> str:\ndef calculate_time_delta_str(start_time_str: str, end_time_str: str) -> float:\ndef create_status_dict_for_rows(data: dict, vis_cols: list) -> dict:\ndef check_if_done_col_leq_X(cfg, data: dict) -> bool:\ndef check_if_there_are_visible_tasks_in_board(data: dict, vis_cols: list) -> bool:\ndef move_first_done_task_to_archive(data: dict):\ndef delete_json_file(db_path: str) -> None:\ndef check_board_name_valid(boardname: str):\ndef scan_files(path=Path.cwd(), endings: list = [\".py\"]):\n def recursive_search(path, file_list: list, progress):\ndef scan_for_todos(\n file_paths: list, rel_path=Path.cwd(), patterns: list = [\"#TODO\", \"# TODO\"]\n) -> list:\ndef split_todo_in_tag_and_title(todo: str, patterns: list):\ndef get_tag_id_choices(data_dict: dict, vis_cols: list) -> list:\ndef check_scanner_files_valid(files: str) -> bool:\ndef check_scanner_patterns_valid(patterns: str) -> bool:\ndef get_iso_calender_info(date_str: str):\ndef create_dict_for_report_view(completed_tasks: list):\ndef create_color_mapping(amount_list: list, max_val: int):\ndef create_report_document(boards_dict: dict):\ndef check_due_date_format(date_str: str) -> bool:\ndef due_date_datetime_to_date(date_datetime: str) -> str:\ndef due_date_date_to_datetime(date_str: str) -> str:\ndef calculate_days_left_till_due(due_date: str):"
}
] | import calendar
from datetime import datetime
from itertools import zip_longest
from rich.prompt import Confirm, IntPrompt, Prompt
from rich.table import Table
from .config import cfg
from .constants import (
BOARD_CAPTION_STRING,
COLOR_DICT,
CONFIG_FILE_PATH,
FOOTER,
REPORT_COLORS,
)
from .utils import (
calculate_days_left_till_due,
calculate_time_delta_str,
check_due_date_format,
console,
create_color_mapping,
create_dict_for_report_view,
create_status_dict_for_rows,
current_time_to_str,
due_date_date_to_datetime,
due_date_datetime_to_date,
) | 1,429 |
# Board
#####################################################################################
def create_table(data: dict) -> Table:
status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)
table_name = cfg.active_board
table = Table(
title=f"[blue]Active Board: {table_name}[/]",
highlight=True,
show_header=True,
show_footer=True if cfg.show_footer == "True" else False,
caption=BOARD_CAPTION_STRING,
)
for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):
table.add_column(
header=category + f"\t({len(status_dict[cfg.vis_cols[i]])} Task/s)",
header_style="bold",
justify="left",
overflow="fold",
footer=FOOTER[0]
if i == 0
else FOOTER[1]
if i == len(cfg.vis_cols) - 1
else "",
min_width=cfg.col_min_width,
)
for row_tasks in zip_longest(*status_dict.values()):
table.add_row(*row_tasks)
return table
# Board Action selection
def input_ask_for_action():
|
# Board
#####################################################################################
def create_table(data: dict) -> Table:
status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)
table_name = cfg.active_board
table = Table(
title=f"[blue]Active Board: {table_name}[/]",
highlight=True,
show_header=True,
show_footer=True if cfg.show_footer == "True" else False,
caption=BOARD_CAPTION_STRING,
)
for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):
table.add_column(
header=category + f"\t({len(status_dict[cfg.vis_cols[i]])} Task/s)",
header_style="bold",
justify="left",
overflow="fold",
footer=FOOTER[0]
if i == 0
else FOOTER[1]
if i == len(cfg.vis_cols) - 1
else "",
min_width=cfg.col_min_width,
)
for row_tasks in zip_longest(*status_dict.values()):
table.add_row(*row_tasks)
return table
# Board Action selection
def input_ask_for_action(): | console.print( | 6 | 2023-11-11 14:43:55+00:00 | 2k |
AMAAI-Lab/mustango | audioldm/latent_diffusion/ddim.py | [
{
"identifier": "make_ddim_sampling_parameters",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt(\n (1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)\n )\n if verbose:\n print(\n f\"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}\"\n )\n print(\n f\"For the chosen value of eta, which is {eta}, \"\n f\"this results in the following sigma_t schedule for ddim sampler {sigmas}\"\n )\n return sigmas, alphas, alphas_prev"
},
{
"identifier": "make_ddim_timesteps",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def make_ddim_timesteps(\n ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True\n):\n if ddim_discr_method == \"uniform\":\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == \"quad\":\n ddim_timesteps = (\n (np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2\n ).astype(int)\n else:\n raise NotImplementedError(\n f'There is no ddim discretization method called \"{ddim_discr_method}\"'\n )\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f\"Selected timesteps for ddim sampler: {steps_out}\")\n return steps_out"
},
{
"identifier": "noise_like",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()"
},
{
"identifier": "extract_into_tensor",
"path": "audioldm/latent_diffusion/util.py",
"snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t).contiguous()\n return out.reshape(b, *((1,) * (len(x_shape) - 1))).contiguous()"
}
] | import torch
import numpy as np
from tqdm import tqdm
from audioldm.latent_diffusion.util import (
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
extract_into_tensor,
) | 1,266 | """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters | ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters( | 0 | 2023-11-14 23:29:31+00:00 | 2k |
lxmusics/lx-music-api-server-python | modules/kg/search.py | [
{
"identifier": "Httpx",
"path": "common/Httpx.py",
"snippet": "def is_valid_utf8(text):\ndef is_plain_text(text):\ndef convert_dict_to_form_string(dic):\ndef log_plaintext(text):\ndef request(url, options = {}):\n def _json():\ndef checkcn():\n def __init__(self, status, content, headers):\n def json(self):\nasync def convert_to_requests_response(aiohttp_response):\nasync def AsyncRequest(url, options = {}):\nclass ClientResponse:"
},
{
"identifier": "utils",
"path": "common/utils.py",
"snippet": "def createBase64Encode(data_bytes):\ndef createHexEncode(data_bytes):\ndef createBase64Decode(data):\ndef createHexDecode(data):\ndef handleInflateRawSync(data):\ndef require(module):\ndef addToGlobalNamespace(key, data):\ndef filterFileName(filename):\ndef createMD5(s: str):\ndef readFile(path, mode = \"text\"):\ndef unique_list(list_in):\ndef encodeURIComponent(component):\ndef decodeURIComponent(component):\ndef encodeURI(uri):\ndef decodeURI(uri):\ndef sortDict(dictionary):\ndef mergeDict(dict1, dict2):\n def __init__(self, d):\n def __setattr__(self, key, value):\n def to_dict(self):\n def __getattr__(self, UNUSED):\ndef dump_xml(data):\ndef load_xml(data):\ndef sizeFormat(size):\ndef timeLengthFormat(t):\ndef timestamp_format(t):\nclass CreateObject(dict):"
},
{
"identifier": "FailedException",
"path": "common/exceptions.py",
"snippet": "class FailedException(Exception):\n # 此错误用于处理代理API请求失败的情况\n pass"
},
{
"identifier": "buildRequestParams",
"path": "modules/kg/utils.py",
"snippet": "def buildRequestParams(dictionary):\n joined_str = '&'.join([f'{k}={v}' for k, v in dictionary.items()])\n return joined_str"
}
] | from common import Httpx
from common import utils
from common.exceptions import FailedException
from .utils import buildRequestParams | 1,189 | # ----------------------------------------
# - mode: python -
# - author: helloplhm-qwq -
# - name: search.py -
# - project: lx-music-api-server -
# - license: MIT -
# ----------------------------------------
# This file is part of the "lx-music-api-server" project.
def formatSubResult(l):
res = []
for songinfo in l:
fileinfo = {}
if (songinfo['FileSize'] != 0):
fileinfo['128k'] = {
'hash': songinfo['FileHash'],
'size': utils.sizeFormat(songinfo['FileSize']),
}
if (songinfo['HQFileSize'] != 0):
fileinfo['320k'] = {
'hash': songinfo['HQFileHash'],
'size': utils.sizeFormat(songinfo['HQFileSize']),
}
if (songinfo['SQFileSize'] != 0):
fileinfo['flac'] = {
'hash': songinfo['SQFileHash'],
'size': utils.sizeFormat(songinfo['SQFileSize']),
}
if (songinfo['ResFileSize'] != 0):
fileinfo['flac24bit'] = {
'hash': songinfo['ResFileHash'],
'size': utils.sizeFormat(songinfo['ResFileSize']),
}
res.append({
'name': songinfo['SongName'],
'name_ori': songinfo['OriSongName'],
'name_extra': songinfo['SongName'].replace(songinfo['OriSongName'], ''),
'singer': songinfo['SingerName'],
'singer_list': [{'name': i['name'], 'id': i['id']} for i in songinfo['Singers']],
'isoriginal': True if (songinfo['IsOriginal'] == 1) else False,
'tag': songinfo.get('TagContent') if songinfo.get('TagContent') else '',
'format_length': utils.timeLengthFormat(songinfo['Duration']),
'length': songinfo['Duration'],
'hash': songinfo['FileHash'],
'file_info': fileinfo,
'songmid': songinfo['Audioid'],
'album_id': songinfo['AlbumID'],
'album': songinfo['AlbumName'],
'language': songinfo['trans_param'].get('language') if songinfo['trans_param'] else '',
'cover': songinfo['Image'].format(size = 1080),
'sizable_cover': songinfo['Image'],
'mvid': songinfo['MvHash'],
})
return res
async def getSongSearchResult(query, page, size):
req = await Httpx.AsyncRequest(utils.encodeURI(f'https://songsearch.kugou.com/song_search_v2?' + buildRequestParams({
"keyword": query,
"page": page,
"pagesize": size,
"userid": 0,
"clientver": "",
"platform": "WebFilter",
"filter": 2,
"iscorrection": 1,
"privilege_filter": 0
})), {
"headers": {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.142.86 Safari/537.36",
"Referer": "https://www.kugou.com",
}
})
body = req.json()
if (body['status'] != 1):
| # ----------------------------------------
# - mode: python -
# - author: helloplhm-qwq -
# - name: search.py -
# - project: lx-music-api-server -
# - license: MIT -
# ----------------------------------------
# This file is part of the "lx-music-api-server" project.
def formatSubResult(l):
res = []
for songinfo in l:
fileinfo = {}
if (songinfo['FileSize'] != 0):
fileinfo['128k'] = {
'hash': songinfo['FileHash'],
'size': utils.sizeFormat(songinfo['FileSize']),
}
if (songinfo['HQFileSize'] != 0):
fileinfo['320k'] = {
'hash': songinfo['HQFileHash'],
'size': utils.sizeFormat(songinfo['HQFileSize']),
}
if (songinfo['SQFileSize'] != 0):
fileinfo['flac'] = {
'hash': songinfo['SQFileHash'],
'size': utils.sizeFormat(songinfo['SQFileSize']),
}
if (songinfo['ResFileSize'] != 0):
fileinfo['flac24bit'] = {
'hash': songinfo['ResFileHash'],
'size': utils.sizeFormat(songinfo['ResFileSize']),
}
res.append({
'name': songinfo['SongName'],
'name_ori': songinfo['OriSongName'],
'name_extra': songinfo['SongName'].replace(songinfo['OriSongName'], ''),
'singer': songinfo['SingerName'],
'singer_list': [{'name': i['name'], 'id': i['id']} for i in songinfo['Singers']],
'isoriginal': True if (songinfo['IsOriginal'] == 1) else False,
'tag': songinfo.get('TagContent') if songinfo.get('TagContent') else '',
'format_length': utils.timeLengthFormat(songinfo['Duration']),
'length': songinfo['Duration'],
'hash': songinfo['FileHash'],
'file_info': fileinfo,
'songmid': songinfo['Audioid'],
'album_id': songinfo['AlbumID'],
'album': songinfo['AlbumName'],
'language': songinfo['trans_param'].get('language') if songinfo['trans_param'] else '',
'cover': songinfo['Image'].format(size = 1080),
'sizable_cover': songinfo['Image'],
'mvid': songinfo['MvHash'],
})
return res
async def getSongSearchResult(query, page, size):
req = await Httpx.AsyncRequest(utils.encodeURI(f'https://songsearch.kugou.com/song_search_v2?' + buildRequestParams({
"keyword": query,
"page": page,
"pagesize": size,
"userid": 0,
"clientver": "",
"platform": "WebFilter",
"filter": 2,
"iscorrection": 1,
"privilege_filter": 0
})), {
"headers": {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.142.86 Safari/537.36",
"Referer": "https://www.kugou.com",
}
})
body = req.json()
if (body['status'] != 1): | raise FailedException('歌曲搜索失败') | 2 | 2023-11-10 13:16:30+00:00 | 2k |
ai-forever/Kandinsky-3 | kandinsky3/model/nn.py | [
{
"identifier": "exist",
"path": "kandinsky3/model/utils.py",
"snippet": "def exist(item):\n return item is not None"
},
{
"identifier": "set_default_layer",
"path": "kandinsky3/model/utils.py",
"snippet": "def set_default_layer(condition, layer_1, args_1=[], kwargs_1={}, layer_2=Identity, args_2=[], kwargs_2={}):\n if condition:\n return layer_1(*args_1, **kwargs_1)\n else:\n return layer_2(*args_2, **kwargs_2)"
}
] | import math
import torch
from torch import nn, einsum
from einops import rearrange, repeat
from .utils import exist, set_default_layer | 757 |
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
@staticmethod
def forward(x, *args, **kwargs):
return x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=x.device) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim=-1)
class ConditionalGroupNorm(nn.Module):
def __init__(self, groups, normalized_shape, context_dim):
super().__init__()
self.norm = nn.GroupNorm(groups, normalized_shape, affine=False)
self.context_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(context_dim, 2 * normalized_shape)
)
self.context_mlp[1].weight.data.zero_()
self.context_mlp[1].bias.data.zero_()
def forward(self, x, context):
context = self.context_mlp(context)
ndims = ' 1' * len(x.shape[2:])
context = rearrange(context, f'b c -> b c{ndims}')
scale, shift = context.chunk(2, dim=1)
x = self.norm(x) * (scale + 1.) + shift
return x
class Attention(nn.Module):
def __init__(self, in_channels, out_channels, context_dim, head_dim=64):
super().__init__()
assert out_channels % head_dim == 0
self.num_heads = out_channels // head_dim
self.scale = head_dim ** -0.5
self.to_query = nn.Linear(in_channels, out_channels, bias=False)
self.to_key = nn.Linear(context_dim, out_channels, bias=False)
self.to_value = nn.Linear(context_dim, out_channels, bias=False)
self.output_layer = nn.Linear(out_channels, out_channels, bias=False)
def forward(self, x, context, context_mask=None):
query = rearrange(self.to_query(x), 'b n (h d) -> b h n d', h=self.num_heads)
key = rearrange(self.to_key(context), 'b n (h d) -> b h n d', h=self.num_heads)
value = rearrange(self.to_value(context), 'b n (h d) -> b h n d', h=self.num_heads)
attention_matrix = einsum('b h i d, b h j d -> b h i j', query, key) * self.scale
|
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
@staticmethod
def forward(x, *args, **kwargs):
return x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=x.device) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim=-1)
class ConditionalGroupNorm(nn.Module):
def __init__(self, groups, normalized_shape, context_dim):
super().__init__()
self.norm = nn.GroupNorm(groups, normalized_shape, affine=False)
self.context_mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(context_dim, 2 * normalized_shape)
)
self.context_mlp[1].weight.data.zero_()
self.context_mlp[1].bias.data.zero_()
def forward(self, x, context):
context = self.context_mlp(context)
ndims = ' 1' * len(x.shape[2:])
context = rearrange(context, f'b c -> b c{ndims}')
scale, shift = context.chunk(2, dim=1)
x = self.norm(x) * (scale + 1.) + shift
return x
class Attention(nn.Module):
def __init__(self, in_channels, out_channels, context_dim, head_dim=64):
super().__init__()
assert out_channels % head_dim == 0
self.num_heads = out_channels // head_dim
self.scale = head_dim ** -0.5
self.to_query = nn.Linear(in_channels, out_channels, bias=False)
self.to_key = nn.Linear(context_dim, out_channels, bias=False)
self.to_value = nn.Linear(context_dim, out_channels, bias=False)
self.output_layer = nn.Linear(out_channels, out_channels, bias=False)
def forward(self, x, context, context_mask=None):
query = rearrange(self.to_query(x), 'b n (h d) -> b h n d', h=self.num_heads)
key = rearrange(self.to_key(context), 'b n (h d) -> b h n d', h=self.num_heads)
value = rearrange(self.to_value(context), 'b n (h d) -> b h n d', h=self.num_heads)
attention_matrix = einsum('b h i d, b h j d -> b h i j', query, key) * self.scale | if exist(context_mask): | 0 | 2023-11-13 10:16:04+00:00 | 2k |
spfrommer/torchexplorer | torchexplorer/render/structs.py | [
{
"identifier": "Tooltip",
"path": "torchexplorer/components/tooltip.py",
"snippet": "class Tooltip:\n \"\"\"The tooltip that pops up next to a Module.\"\"\"\n\n def __init__(self, title: str, keys: list[str], vals: list[str]):\n self.title = title\n self.keys = keys\n self.vals = vals\n \n @classmethod\n def create_io(cls, tracker: SizeTracker) -> 'Tooltip':\n name = tracker.type.split('.')[-1]\n keys, vals = ['size'], [str(tracker.size).replace('None', dash)]\n return Tooltip(name, keys, vals)\n \n @classmethod\n def create_moduleinvocation(\n cls, module: Module, parent_module: Module, invocation_id: InvocationId\n ) -> 'Tooltip':\n\n name_in_parent = cls._get_name_in_parent(module, parent_module)\n\n io_shape_keys, io_shape_vals = cls._get_io_shape_keyvals(module, invocation_id)\n extra_repr_keys, extra_repr_vals = cls._get_extra_repr_keyvals(module)\n\n keys = io_shape_keys + extra_repr_keys\n vals = io_shape_vals + extra_repr_vals\n\n assert len(keys) == len(vals)\n\n return Tooltip(name_in_parent, keys, vals)\n \n @classmethod\n def create_attach(cls, module: Module) -> 'Tooltip':\n return cls.create_io(module.torchexplorer_metadata.input_sizes[0][0])\n \n @classmethod\n def _get_name_in_parent(cls, module: Module, parent_module: Module) -> str:\n name_in_parent = ''\n for name, m in parent_module.named_children():\n if m == module:\n name_in_parent = name\n break\n \n if isinstance(m, ModuleList):\n for i, mm in enumerate(m):\n if mm == module:\n name_in_parent = f'{name}[{i}]'\n break\n \n if isinstance(m, ModuleDict):\n for k, mm in m.items():\n if mm == module:\n name_in_parent = f'{name}[{k}]'\n break\n \n return name_in_parent\n\n @classmethod\n def _get_io_shape_keyvals(\n cls, module: Module, invocation_id: InvocationId\n ) -> tuple[list[str], list[str]]:\n\n metadata = module.torchexplorer_metadata \n\n keys, vals = [], []\n\n one_input = len(metadata.input_sizes[invocation_id]) == 1\n for i, input_tracker in enumerate(metadata.input_sizes[invocation_id]):\n keys.append('in_size' if one_input else f'in{i}_size')\n vals.append(str(input_tracker.size).replace('None', dash))\n \n one_output = len(metadata.output_sizes[invocation_id]) == 1\n for i, output_tracker in enumerate(metadata.output_sizes[invocation_id]):\n keys.append('out_size' if one_output else f'out{i}_size')\n vals.append(str(output_tracker.size).replace('None', dash))\n\n return keys, vals\n \n @classmethod\n def _get_extra_repr_keyvals(cls, module: Module) -> tuple[list[str], list[str]]:\n try:\n keys, vals = [], []\n extra_rep = module.extra_repr()\n pairs = re.split(r',\\s*(?![^()]*\\))(?![^[]]*\\])', extra_rep)\n for pair in pairs:\n if pair == '':\n continue\n k, v = pair.split('=') if ('=' in pair) else (dash, pair)\n keys.append(k.strip())\n vals.append(v.strip())\n except Exception:\n keys, vals = [], []\n \n return keys, vals"
},
{
"identifier": "ModuleInvocationHistograms",
"path": "torchexplorer/core.py",
"snippet": "class ModuleInvocationHistograms:\n \"\"\"The histograms associated to a particular InvocationId on a module.\"\"\"\n input_hists: list[IncrementalHistogram] = field(default_factory=lambda: [])\n output_hists: list[IncrementalHistogram] = field(default_factory=lambda: [])"
},
{
"identifier": "ModuleSharedHistograms",
"path": "torchexplorer/core.py",
"snippet": "class ModuleSharedHistograms:\n \"\"\"The histograms are shared across all InvocationId on a module.\"\"\"\n param_hists: dict[ParamName, IncrementalHistogram] = dict_field()\n param_grad_hists: dict[ParamName, IncrementalHistogram] = dict_field()"
}
] | from typing import Optional
from dataclasses import dataclass, field
from torchexplorer.components.tooltip import Tooltip
from torchexplorer.core import (
ModuleInvocationHistograms, ModuleSharedHistograms
) | 1,250 | from __future__ import annotations
@dataclass
class EdgeLayout:
path_points: list[list[float]]
arrowhead_points: list[list[float]]
downstream_input_index: Optional[int]
upstream_output_index: Optional[int]
@dataclass
class TooltipLayout:
tooltip: Tooltip
# Coordinates in parent of the layout this tooltip belongs to
bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0])
top_right_corner: list[float] = field(default_factory=lambda: [0, 0])
# Either a specific module invocation or for IO
@dataclass
class NodeLayout:
display_name: Optional[str] = None
tooltip: Optional[TooltipLayout] = None
invocation_hists: Optional[ModuleInvocationHistograms] = None
invocation_grad_hists: Optional[ModuleInvocationHistograms] = None
| from __future__ import annotations
@dataclass
class EdgeLayout:
path_points: list[list[float]]
arrowhead_points: list[list[float]]
downstream_input_index: Optional[int]
upstream_output_index: Optional[int]
@dataclass
class TooltipLayout:
tooltip: Tooltip
# Coordinates in parent of the layout this tooltip belongs to
bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0])
top_right_corner: list[float] = field(default_factory=lambda: [0, 0])
# Either a specific module invocation or for IO
@dataclass
class NodeLayout:
display_name: Optional[str] = None
tooltip: Optional[TooltipLayout] = None
invocation_hists: Optional[ModuleInvocationHistograms] = None
invocation_grad_hists: Optional[ModuleInvocationHistograms] = None | shared_hists: Optional[ModuleSharedHistograms] = None | 2 | 2023-11-13 05:56:04+00:00 | 2k |
namin/llm-verified-with-monte-carlo-tree-search | huggingface_generate.py | [
{
"identifier": "STOP_WORD",
"path": "lang_config.py",
"snippet": "STOP_WORD = \"\\n\""
},
{
"identifier": "BASE_MODEL_NAME",
"path": "model_config.py",
"snippet": "BASE_MODEL_NAME = args.base_model_name"
},
{
"identifier": "PEFT_MODEL_PATH",
"path": "model_config.py",
"snippet": "PEFT_MODEL_PATH = args.peft_model_path"
},
{
"identifier": "PPO_MODEL_PATH",
"path": "model_config.py",
"snippet": "PPO_MODEL_PATH = args.ppo_model_path"
},
{
"identifier": "CUSTOM_STOP",
"path": "model_config.py",
"snippet": "CUSTOM_STOP = args.custom_stop"
},
{
"identifier": "SAME_FOR_MANY_SAMPLES",
"path": "model_config.py",
"snippet": "SAME_FOR_MANY_SAMPLES = args.same_for_many_samples"
},
{
"identifier": "BEAM_SEARCH",
"path": "model_config.py",
"snippet": "BEAM_SEARCH = args.beam_search"
},
{
"identifier": "MODEL_ARG_TOP_K",
"path": "model_config.py",
"snippet": "MODEL_ARG_TOP_K = args.model_arg_topk"
},
{
"identifier": "MODEL_ARG_TOP_P",
"path": "model_config.py",
"snippet": "MODEL_ARG_TOP_P = args.model_arg_topp"
},
{
"identifier": "MODEL_ARG_TEMP",
"path": "model_config.py",
"snippet": "MODEL_ARG_TEMP = args.model_arg_temp"
}
] | import torch
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead
from peft import PeftModel
from lang_config import STOP_WORD
from model_config import BASE_MODEL_NAME, PEFT_MODEL_PATH, PPO_MODEL_PATH, CUSTOM_STOP, SAME_FOR_MANY_SAMPLES, BEAM_SEARCH, MODEL_ARG_TOP_K, MODEL_ARG_TOP_P, MODEL_ARG_TEMP
from typing import List | 812 |
def load_model(
base_model_name: str = BASE_MODEL_NAME,
ppo_model_path: str = PPO_MODEL_PATH,
peft_model_path: str = PEFT_MODEL_PATH,
) -> (AutoModelForCausalLM, PeftModel, AutoTokenizer):
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
)
if ppo_model_path is None:
base_model = AutoModelForCausalLM.from_pretrained(
base_model_name,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
use_auth_token=True,
)
tokenizer = AutoTokenizer.from_pretrained(
base_model_name, trust_remote_code=True
)
else:
base_model = AutoModelForCausalLMWithValueHead.from_pretrained(
ppo_model_path, quantization_config=bnb_config
)
tokenizer = AutoTokenizer.from_pretrained(ppo_model_path)
tokenizer.pad_token = tokenizer.eos_token
model = (
PeftModel.from_pretrained(base_model, peft_model_path)
if peft_model_path
else base_model
)
return (base_model, model, tokenizer)
def stop_words_ids(tokenizer: AutoTokenizer) -> List[int]:
# Hack: we want the stop word as it is encoded glued to another word.
stop_word_id = tokenizer.encode("hello" + STOP_WORD, add_special_tokens=False)[-1]
quote_word_id = tokenizer.encode("```", add_special_tokens=False)[-1]
return [stop_word_id, quote_word_id]
def get_model_generation_token_args(
tokenizer: AutoTokenizer, custom_stop: bool = CUSTOM_STOP
):
return dict(
min_length=5,
max_new_tokens=100,
eos_token_id=stop_words_ids(tokenizer)
if custom_stop
else tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
def get_model_generation_search_args(
num: int,
beam_search: bool = BEAM_SEARCH
):
if beam_search:
return dict(
num_beams=num,
num_beam_groups=num,
diversity_penalty=0.9,
)
else:
return dict(
top_k=MODEL_ARG_TOP_K if MODEL_ARG_TOP_K is not None else 50 if num>1 and not SAME_FOR_MANY_SAMPLES else 7,
|
def load_model(
base_model_name: str = BASE_MODEL_NAME,
ppo_model_path: str = PPO_MODEL_PATH,
peft_model_path: str = PEFT_MODEL_PATH,
) -> (AutoModelForCausalLM, PeftModel, AutoTokenizer):
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
)
if ppo_model_path is None:
base_model = AutoModelForCausalLM.from_pretrained(
base_model_name,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
use_auth_token=True,
)
tokenizer = AutoTokenizer.from_pretrained(
base_model_name, trust_remote_code=True
)
else:
base_model = AutoModelForCausalLMWithValueHead.from_pretrained(
ppo_model_path, quantization_config=bnb_config
)
tokenizer = AutoTokenizer.from_pretrained(ppo_model_path)
tokenizer.pad_token = tokenizer.eos_token
model = (
PeftModel.from_pretrained(base_model, peft_model_path)
if peft_model_path
else base_model
)
return (base_model, model, tokenizer)
def stop_words_ids(tokenizer: AutoTokenizer) -> List[int]:
# Hack: we want the stop word as it is encoded glued to another word.
stop_word_id = tokenizer.encode("hello" + STOP_WORD, add_special_tokens=False)[-1]
quote_word_id = tokenizer.encode("```", add_special_tokens=False)[-1]
return [stop_word_id, quote_word_id]
def get_model_generation_token_args(
tokenizer: AutoTokenizer, custom_stop: bool = CUSTOM_STOP
):
return dict(
min_length=5,
max_new_tokens=100,
eos_token_id=stop_words_ids(tokenizer)
if custom_stop
else tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
def get_model_generation_search_args(
num: int,
beam_search: bool = BEAM_SEARCH
):
if beam_search:
return dict(
num_beams=num,
num_beam_groups=num,
diversity_penalty=0.9,
)
else:
return dict(
top_k=MODEL_ARG_TOP_K if MODEL_ARG_TOP_K is not None else 50 if num>1 and not SAME_FOR_MANY_SAMPLES else 7, | top_p=MODEL_ARG_TOP_P if MODEL_ARG_TOP_P is not None else 0.9, | 8 | 2023-11-11 19:56:04+00:00 | 2k |
BraveGroup/Drive-WM | src/diffusers/utils/constants.py | [
{
"identifier": "dep_version_check",
"path": "src/diffusers/dependency_versions_check.py",
"snippet": "def dep_version_check(pkg, hint=None):\n require_version(deps[pkg], hint)"
},
{
"identifier": "ENV_VARS_TRUE_VALUES",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "ENV_VARS_TRUE_VALUES = {\"1\", \"ON\", \"YES\", \"TRUE\"}"
},
{
"identifier": "is_peft_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_peft_available():\n return _peft_available"
},
{
"identifier": "is_transformers_available",
"path": "src/diffusers/utils/import_utils.py",
"snippet": "def is_transformers_available():\n return _transformers_available"
}
] | import importlib
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available | 670 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default_cache_path = HUGGINGFACE_HUB_CACHE
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_CACHE = default_cache_path
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
| # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default_cache_path = HUGGINGFACE_HUB_CACHE
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_CACHE = default_cache_path
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION) | _required_transformers_version = is_transformers_available() and version.parse( | 3 | 2023-11-18 01:40:55+00:00 | 2k |
basnijholt/unidep | unidep/_pytest_plugin.py | [
{
"identifier": "find_requirements_files",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def find_requirements_files(\n base_dir: str | Path = \".\",\n depth: int = 1,\n *,\n verbose: bool = False,\n) -> list[Path]:\n \"\"\"Scan a directory for `requirements.yaml` and `pyproject.toml` files.\"\"\"\n base_path = Path(base_dir)\n found_files = []\n\n # Define a helper function to recursively scan directories\n def _scan_dir(path: Path, current_depth: int) -> None:\n if verbose:\n print(f\"🔍 Scanning in `{path}` at depth {current_depth}\")\n if current_depth > depth:\n return\n for child in path.iterdir():\n if child.is_dir():\n _scan_dir(child, current_depth + 1)\n elif child.name == \"requirements.yaml\":\n found_files.append(child)\n if verbose:\n print(f'🔍 Found `\"requirements.yaml\"` at `{child}`')\n elif child.name == \"pyproject.toml\" and unidep_configured_in_toml(child):\n if verbose:\n print(f'🔍 Found `\"pyproject.toml\"` with dependencies at `{child}`')\n found_files.append(child)\n\n _scan_dir(base_path, 0)\n return sorted(found_files)"
},
{
"identifier": "parse_local_dependencies",
"path": "unidep/_dependencies_parsing.py",
"snippet": "def parse_local_dependencies(\n *paths: Path,\n check_pip_installable: bool = True,\n verbose: bool = False,\n) -> dict[Path, list[Path]]:\n \"\"\"Extract local project dependencies from a list of `requirements.yaml` or `pyproject.toml` files.\n\n Works by loading the specified `local_dependencies` list.\n \"\"\" # noqa: E501\n dependencies: dict[str, set[str]] = defaultdict(set)\n\n for p in paths:\n if verbose:\n print(f\"🔗 Analyzing dependencies in `{p}`\")\n base_path = p.resolve().parent\n _extract_local_dependencies(\n path=p,\n base_path=base_path,\n processed=set(),\n dependencies=dependencies,\n check_pip_installable=check_pip_installable,\n verbose=verbose,\n )\n\n return {\n Path(k): sorted({Path(v) for v in v_set})\n for k, v_set in sorted(dependencies.items())\n }"
}
] | import os
import sys
import pytest
from pathlib import Path
from typing import TYPE_CHECKING
from unidep._dependencies_parsing import (
find_requirements_files,
parse_local_dependencies,
)
from git import Repo | 937 | """unidep - Unified Conda and Pip requirements management.
Pytest plugin for running only tests of changed files.
WARNING: Still experimental and not documented.
"""
from __future__ import annotations
if TYPE_CHECKING:
def pytest_addoption(parser: pytest.Parser) -> None: # pragma: no cover
"""Add options to the pytest command line."""
parser.addoption(
"--run-affected",
action="store_true",
default=False,
help="Run only tests from affected packages",
)
parser.addoption(
"--branch",
action="store",
default="origin/main",
help="Branch to compare with for finding affected tests",
)
parser.addoption(
"--repo-root",
action="store",
default=".",
type=Path,
help="Root of the repository",
)
def pytest_collection_modifyitems(
config: pytest.Config,
items: list[pytest.Item],
) -> None: # pragma: no cover
"""Filter tests based on the --run-affected option."""
if not config.getoption("--run-affected"):
return
try:
except ImportError:
print(
"🛑 You need to install `gitpython` to use the `--run-affected` option."
"run `pip install gitpython` to install it.",
)
sys.exit(1)
compare_branch = config.getoption("--branch")
repo_root = Path(config.getoption("--repo-root")).absolute()
repo = Repo(repo_root)
| """unidep - Unified Conda and Pip requirements management.
Pytest plugin for running only tests of changed files.
WARNING: Still experimental and not documented.
"""
from __future__ import annotations
if TYPE_CHECKING:
def pytest_addoption(parser: pytest.Parser) -> None: # pragma: no cover
"""Add options to the pytest command line."""
parser.addoption(
"--run-affected",
action="store_true",
default=False,
help="Run only tests from affected packages",
)
parser.addoption(
"--branch",
action="store",
default="origin/main",
help="Branch to compare with for finding affected tests",
)
parser.addoption(
"--repo-root",
action="store",
default=".",
type=Path,
help="Root of the repository",
)
def pytest_collection_modifyitems(
config: pytest.Config,
items: list[pytest.Item],
) -> None: # pragma: no cover
"""Filter tests based on the --run-affected option."""
if not config.getoption("--run-affected"):
return
try:
except ImportError:
print(
"🛑 You need to install `gitpython` to use the `--run-affected` option."
"run `pip install gitpython` to install it.",
)
sys.exit(1)
compare_branch = config.getoption("--branch")
repo_root = Path(config.getoption("--repo-root")).absolute()
repo = Repo(repo_root) | found_files = find_requirements_files(repo_root) | 0 | 2023-11-16 04:23:01+00:00 | 2k |
BAAI-DCAI/SegVol | segment_anything_volumetric/modeling/image_encoder.py | [
{
"identifier": "LayerNorm2d",
"path": "segment_anything_volumetric/modeling/common.py",
"snippet": "class LayerNorm2d(nn.Module):\n def __init__(self, num_channels: int, eps: float = 1e-6) -> None:\n super().__init__()\n self.weight = nn.Parameter(torch.ones(num_channels))\n self.bias = nn.Parameter(torch.zeros(num_channels))\n self.eps = eps\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n u = x.mean(1, keepdim=True)\n s = (x - u).pow(2).mean(1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n x = self.weight[:, None, None] * x + self.bias[:, None, None]\n return x"
},
{
"identifier": "MLPBlock",
"path": "segment_anything_volumetric/modeling/common.py",
"snippet": "class MLPBlock(nn.Module):\n def __init__(\n self,\n embedding_dim: int,\n mlp_dim: int,\n act: Type[nn.Module] = nn.GELU,\n ) -> None:\n super().__init__()\n self.lin1 = nn.Linear(embedding_dim, mlp_dim)\n self.lin2 = nn.Linear(mlp_dim, embedding_dim)\n self.act = act()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.lin2(self.act(self.lin1(x)))"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
from monai.networks.blocks import PatchEmbed | 1,205 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 1,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
# self.patch_embed = PatchEmbed(
# kernel_size=(patch_size, patch_size),
# stride=(patch_size, patch_size),
# in_chans=in_chans,
# embed_dim=embed_dim,
# )
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
spatial_dims=3,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 1,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
# self.patch_embed = PatchEmbed(
# kernel_size=(patch_size, patch_size),
# stride=(patch_size, patch_size),
# in_chans=in_chans,
# embed_dim=embed_dim,
# )
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
spatial_dims=3,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
), | LayerNorm2d(out_chans), | 0 | 2023-11-10 08:25:37+00:00 | 2k |
xk-huang/segment-caption-anything | scripts/tools/utils/git_utils/tsv_io.py | [
{
"identifier": "qd_tqdm",
"path": "scripts/tools/utils/git_utils/common.py",
"snippet": "def qd_tqdm(*args, **kwargs):\n desc = kwargs.get(\"desc\", \"\")\n import inspect\n\n frame = inspect.currentframe()\n frames = inspect.getouterframes(frame)\n frame = frames[1].frame\n line_number = frame.f_lineno\n fname = op.basename(frame.f_code.co_filename)\n message = \"{}:{}\".format(fname, line_number)\n\n if \"desc\" in kwargs:\n kwargs[\"desc\"] = message + \" \" + desc\n else:\n kwargs[\"desc\"] = message\n\n if \"mininterval\" not in kwargs:\n # every 2 secons; default is 0.1 second which is too frequent\n kwargs[\"mininterval\"] = 2\n\n return tqdm(*args, **kwargs)"
},
{
"identifier": "dict_update_path_value",
"path": "scripts/tools/utils/git_utils/common.py",
"snippet": "def dict_update_path_value(d, p, v):\n ps = p.split(\"$\")\n while True:\n if len(ps) == 1:\n d[ps[0]] = v\n break\n else:\n if ps[0] not in d:\n d[ps[0]] = {}\n d = d[ps[0]]\n ps = ps[1:]"
},
{
"identifier": "dict_get_path_value",
"path": "scripts/tools/utils/git_utils/common.py",
"snippet": "def dict_get_path_value(d, p, with_type=False):\n ps = p.split(\"$\")\n cur_dict = d\n while True:\n if len(ps) > 0:\n k = dict_parse_key(ps[0], with_type)\n if isinstance(cur_dict, (tuple, list)):\n cur_dict = cur_dict[int(k)]\n else:\n cur_dict = cur_dict[k]\n ps = ps[1:]\n else:\n return cur_dict"
},
{
"identifier": "get_all_path",
"path": "scripts/tools/utils/git_utils/common.py",
"snippet": "def get_all_path(d, with_type=False, leaf_only=True, with_list=True):\n assert not with_type, \"will not support\"\n all_path = []\n\n if isinstance(d, dict):\n for k, v in d.items():\n all_sub_path = get_all_path(\n v, with_type, leaf_only=leaf_only, with_list=with_list\n )\n all_path.extend([k + \"$\" + p for p in all_sub_path])\n if not leaf_only or len(all_sub_path) == 0:\n all_path.append(k)\n elif (isinstance(d, tuple) or isinstance(d, list)) and with_list:\n for i, _v in enumerate(d):\n all_sub_path = get_all_path(\n _v,\n with_type,\n leaf_only=leaf_only,\n with_list=with_list,\n )\n all_path.extend([\"{}$\".format(i) + p for p in all_sub_path])\n if not leaf_only or len(all_sub_path) == 0:\n all_path.append(\"{}\".format(i))\n return all_path"
},
{
"identifier": "load_from_yaml_str",
"path": "scripts/tools/utils/git_utils/common.py",
"snippet": "def load_from_yaml_str(s):\n return yaml.load(s, Loader=yaml.UnsafeLoader)"
}
] | import numpy as np
import shutil
import mmap
import time
import logging
import types
import os
import os.path as op
import subprocess
import tempfile
import hashlib
import logging
import struct
from .common import qd_tqdm as tqdm
from .common import (
dict_update_path_value,
dict_get_path_value,
get_all_path,
load_from_yaml_str,
)
from azfuse import File
from contextlib import contextmanager
from datasets.utils.filelock import FileLock
from urllib.parse import urlparse, urlunparse
from pathos.multiprocessing import ProcessingPool as Pool | 1,305 |
# NOTE(xiaoke): Modified. Try to use azfuse.File if possible.
try:
except ImportError:
File = types.SimpleNamespace()
File.open = open
File.get_file_size = lambda x: os.stat(x).st_size
logger = logging.getLogger(__name__)
def concat_files(ins, out):
File.prepare(ins)
with File.open(out, "wb") as fp_out:
for i, f in enumerate(ins):
logging.info("concating {}/{} - {}".format(i, len(ins), f))
with File.open(f, "rb") as fp_in:
shutil.copyfileobj(fp_in, fp_out, 1024 * 1024 * 10)
def concat_tsv_files(tsvs, out_tsv):
if len(tsvs) == 1 and tsvs[0] == out_tsv:
return
File.prepare(tsvs)
concat_files(tsvs, out_tsv)
sizes = [File.get_file_size(t) for t in tsvs]
sizes = np.cumsum(sizes)
sizes = [0] + sizes[:-1].tolist()
concate_lineidx_8b(sizes, tsvs, out_tsv)
def get_tmp_folder():
folder = os.environ.get("GIT_TMP_FOLDER", "/tmp")
return folder
def parallel_map(func, all_task, num_worker=16):
if num_worker > 0:
with Pool(num_worker) as m:
result = m.map(func, all_task)
return result
else:
result = []
for t in all_task:
result.append(func(t))
return result
def ensure_remove_file(d):
if op.isfile(d) or op.islink(d):
try:
os.remove(d)
except:
pass
def concate_lineidx_8b(sizes, tsvs, out_tsv):
File.prepare(tsvs)
folder = get_tmp_folder()
def row_processor_8b(row):
offset, in_tsv, out_tsv = row
|
# NOTE(xiaoke): Modified. Try to use azfuse.File if possible.
try:
except ImportError:
File = types.SimpleNamespace()
File.open = open
File.get_file_size = lambda x: os.stat(x).st_size
logger = logging.getLogger(__name__)
def concat_files(ins, out):
File.prepare(ins)
with File.open(out, "wb") as fp_out:
for i, f in enumerate(ins):
logging.info("concating {}/{} - {}".format(i, len(ins), f))
with File.open(f, "rb") as fp_in:
shutil.copyfileobj(fp_in, fp_out, 1024 * 1024 * 10)
def concat_tsv_files(tsvs, out_tsv):
if len(tsvs) == 1 and tsvs[0] == out_tsv:
return
File.prepare(tsvs)
concat_files(tsvs, out_tsv)
sizes = [File.get_file_size(t) for t in tsvs]
sizes = np.cumsum(sizes)
sizes = [0] + sizes[:-1].tolist()
concate_lineidx_8b(sizes, tsvs, out_tsv)
def get_tmp_folder():
folder = os.environ.get("GIT_TMP_FOLDER", "/tmp")
return folder
def parallel_map(func, all_task, num_worker=16):
if num_worker > 0:
with Pool(num_worker) as m:
result = m.map(func, all_task)
return result
else:
result = []
for t in all_task:
result.append(func(t))
return result
def ensure_remove_file(d):
if op.isfile(d) or op.islink(d):
try:
os.remove(d)
except:
pass
def concate_lineidx_8b(sizes, tsvs, out_tsv):
File.prepare(tsvs)
folder = get_tmp_folder()
def row_processor_8b(row):
offset, in_tsv, out_tsv = row | fbar = tqdm(unit_scale=True) | 1 | 2023-11-17 14:10:41+00:00 | 2k |
fjzzq2002/is-my-problem-new | src/scrapper/codeforces.py | [
{
"identifier": "read_problems",
"path": "src/utils.py",
"snippet": "def read_problems(filename):\n # read as a json\n with open(filename) as f:\n problems = json.load(f)\n return [x for x in problems if len(x[\"statement\"].strip()) >= 5]"
},
{
"identifier": "dump_json_safe",
"path": "src/utils.py",
"snippet": "def dump_json_safe(obj, filename):\n import tempfile\n\n with tempfile.NamedTemporaryFile(mode=\"w\", delete=False) as f:\n json.dump(obj, f)\n shutil.move(f.name, filename)"
},
{
"identifier": "get_text",
"path": "src/utils.py",
"snippet": "def get_text(tag: bs4.Tag) -> str:\n _inline_elements = {\n \"a\",\n \"span\",\n \"em\",\n \"strong\",\n \"u\",\n \"i\",\n \"font\",\n \"mark\",\n \"label\",\n \"s\",\n \"sub\",\n \"sup\",\n \"tt\",\n \"bdo\",\n \"button\",\n \"cite\",\n \"del\",\n \"b\",\n \"a\",\n \"font\",\n }\n\n def _get_text(tag: bs4.Tag) -> typing.Generator:\n for child in tag.children:\n if isinstance(child, bs4.Tag):\n # if the tag is a block type tag then yield new lines before after\n is_block_element = child.name not in _inline_elements\n if is_block_element:\n yield \"\\n\"\n yield from [\"\\n\"] if child.name == \"br\" else _get_text(child)\n if is_block_element:\n yield \"\\n\"\n elif isinstance(child, bs4.NavigableString):\n yield child.string\n\n return \"\".join(_get_text(tag))"
}
] | from ..utils import read_problems, dump_json_safe, get_text
from bs4 import BeautifulSoup
from tqdm.auto import tqdm
import json
import os
import requests
import time
import random | 992 |
scrapped_problems = []
try:
scrapped_problems = read_problems("problems/codeforces.json")
print(f"Recalled {len(scrapped_problems)} scrapped problems")
except:
print("Cannot find scrapped problems")
scrapped_uids = set(p["uid"] for p in scrapped_problems)
codeforces_endpoint = "https://codeforces.com/api/problemset.problems"
# get list of problems
list_problems = requests.get(codeforces_endpoint).json()["result"]["problems"]
# the website is down, read problems.txt instead
# with open('problems.txt') as f:
# list_problems = json.load(f)['result']['problems']
print("# problems:", len(list_problems))
# a scrapper for codeforces
def scrap_problem(contestId, index, rating, tags, uid):
url = f"https://codeforces.com/contest/{contestId}/problem/{index}"
response = requests.get(url, timeout=30)
soup = BeautifulSoup(response.content, "html.parser")
statement = soup.find(class_="problem-statement")
try:
statement.find(class_="header").decompose()
except:
pass
statement_body = statement.find("div")
statement_body = get_text(statement_body)
# \r -> \n, remove duplicate \n, strip
statement_body = (
statement_body.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("$$$", "$")
.strip()
)
problem = {
"uid": uid,
"url": url,
"tags": tags,
# 'raw': str(response.content),
"statement": statement_body,
"contestId": contestId,
"index": index,
"rating": rating,
}
return problem
for problem in tqdm(list_problems):
contestId, index, rating, tags = (
problem["contestId"],
problem["index"],
problem.get("rating", -1),
problem["tags"],
)
uid = f"Codeforces{contestId}{index}"
if uid in scrapped_uids:
continue
print(f"Scrapping {uid}")
result = None
try:
result = scrap_problem(contestId, index, rating, tags, uid)
except Exception as e:
print("Error while scrapping:", e)
if result is not None:
scrapped_problems.append(result)
time.sleep(0.1)
# save to file every 10 problems
if random.random() < 0.1:
|
scrapped_problems = []
try:
scrapped_problems = read_problems("problems/codeforces.json")
print(f"Recalled {len(scrapped_problems)} scrapped problems")
except:
print("Cannot find scrapped problems")
scrapped_uids = set(p["uid"] for p in scrapped_problems)
codeforces_endpoint = "https://codeforces.com/api/problemset.problems"
# get list of problems
list_problems = requests.get(codeforces_endpoint).json()["result"]["problems"]
# the website is down, read problems.txt instead
# with open('problems.txt') as f:
# list_problems = json.load(f)['result']['problems']
print("# problems:", len(list_problems))
# a scrapper for codeforces
def scrap_problem(contestId, index, rating, tags, uid):
url = f"https://codeforces.com/contest/{contestId}/problem/{index}"
response = requests.get(url, timeout=30)
soup = BeautifulSoup(response.content, "html.parser")
statement = soup.find(class_="problem-statement")
try:
statement.find(class_="header").decompose()
except:
pass
statement_body = statement.find("div")
statement_body = get_text(statement_body)
# \r -> \n, remove duplicate \n, strip
statement_body = (
statement_body.replace("\r", "\n")
.replace("\n\n", "\n")
.replace("$$$", "$")
.strip()
)
problem = {
"uid": uid,
"url": url,
"tags": tags,
# 'raw': str(response.content),
"statement": statement_body,
"contestId": contestId,
"index": index,
"rating": rating,
}
return problem
for problem in tqdm(list_problems):
contestId, index, rating, tags = (
problem["contestId"],
problem["index"],
problem.get("rating", -1),
problem["tags"],
)
uid = f"Codeforces{contestId}{index}"
if uid in scrapped_uids:
continue
print(f"Scrapping {uid}")
result = None
try:
result = scrap_problem(contestId, index, rating, tags, uid)
except Exception as e:
print("Error while scrapping:", e)
if result is not None:
scrapped_problems.append(result)
time.sleep(0.1)
# save to file every 10 problems
if random.random() < 0.1: | dump_json_safe(scrapped_problems, "problems/codeforces.json") | 1 | 2023-11-15 07:58:49+00:00 | 2k |
p0p4k/pflowtts_pytorch | pflow/utils/generate_data_statistics.py | [
{
"identifier": "TextMelDataModule",
"path": "pflow/data/text_mel_datamodule.py",
"snippet": "class TextMelDataModule(LightningDataModule):\n def __init__( # pylint: disable=unused-argument\n self,\n name,\n train_filelist_path,\n valid_filelist_path,\n batch_size,\n num_workers,\n pin_memory,\n cleaners,\n add_blank,\n n_spks,\n n_fft,\n n_feats,\n sample_rate,\n hop_length,\n win_length,\n f_min,\n f_max,\n data_statistics,\n seed,\n ):\n super().__init__()\n\n # this line allows to access init params with 'self.hparams' attribute\n # also ensures init params will be stored in ckpt\n self.save_hyperparameters(logger=False)\n\n def setup(self, stage: Optional[str] = None): # pylint: disable=unused-argument\n \"\"\"Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.\n\n This method is called by lightning with both `trainer.fit()` and `trainer.test()`, so be\n careful not to execute things like random split twice!\n \"\"\"\n # load and split datasets only if not loaded already\n\n self.trainset = TextMelDataset( # pylint: disable=attribute-defined-outside-init\n self.hparams.train_filelist_path,\n self.hparams.n_spks,\n self.hparams.cleaners,\n self.hparams.add_blank,\n self.hparams.n_fft,\n self.hparams.n_feats,\n self.hparams.sample_rate,\n self.hparams.hop_length,\n self.hparams.win_length,\n self.hparams.f_min,\n self.hparams.f_max,\n self.hparams.data_statistics,\n self.hparams.seed,\n )\n self.validset = TextMelDataset( # pylint: disable=attribute-defined-outside-init\n self.hparams.valid_filelist_path,\n self.hparams.n_spks,\n self.hparams.cleaners,\n self.hparams.add_blank,\n self.hparams.n_fft,\n self.hparams.n_feats,\n self.hparams.sample_rate,\n self.hparams.hop_length,\n self.hparams.win_length,\n self.hparams.f_min,\n self.hparams.f_max,\n self.hparams.data_statistics,\n self.hparams.seed,\n )\n\n def train_dataloader(self):\n return DataLoader(\n dataset=self.trainset,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n pin_memory=self.hparams.pin_memory,\n shuffle=True,\n collate_fn=TextMelBatchCollate(self.hparams.n_spks),\n )\n\n def val_dataloader(self):\n return DataLoader(\n dataset=self.validset,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n pin_memory=self.hparams.pin_memory,\n shuffle=False,\n collate_fn=TextMelBatchCollate(self.hparams.n_spks),\n )\n\n def teardown(self, stage: Optional[str] = None):\n \"\"\"Clean up after fit or test.\"\"\"\n pass # pylint: disable=unnecessary-pass\n\n def state_dict(self): # pylint: disable=no-self-use\n \"\"\"Extra things to save to checkpoint.\"\"\"\n return {}\n\n def load_state_dict(self, state_dict: Dict[str, Any]):\n \"\"\"Things to do when loading checkpoint.\"\"\"\n pass # pylint: disable=unnecessary-pass"
},
{
"identifier": "pylogger",
"path": "pflow/utils/logging_utils.py",
"snippet": "def log_hyperparameters(object_dict: Dict[str, Any]) -> None:"
}
] | import os
import sys
import argparse
import json
import sys
import rootutils
import torch
from pathlib import Path
from hydra import compose, initialize
from omegaconf import open_dict
from tqdm.auto import tqdm
from pflow.data.text_mel_datamodule import TextMelDataModule
from pflow.utils.logging_utils import pylogger | 992 | r"""
The file creates a pickle file where the values needed for loading of dataset is stored and the model can load it
when needed.
Parameters from hparam.py will be used
"""
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
| r"""
The file creates a pickle file where the values needed for loading of dataset is stored and the model can load it
when needed.
Parameters from hparam.py will be used
"""
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
| log = pylogger.get_pylogger(__name__) | 1 | 2023-11-11 16:08:17+00:00 | 2k |
theroyallab/tabbyAPI | start.py | [
{
"identifier": "convert_args_to_dict",
"path": "args.py",
"snippet": "def convert_args_to_dict(args: argparse.Namespace, parser: argparse.ArgumentParser):\n \"\"\"Broad conversion of surface level arg groups to dictionaries\"\"\"\n\n arg_groups = {}\n for group in parser._action_groups:\n group_dict = {}\n for arg in group._group_actions:\n value = getattr(args, arg.dest, None)\n if value is not None:\n group_dict[arg.dest] = value\n\n arg_groups[group.title] = group_dict\n\n return arg_groups"
},
{
"identifier": "init_argparser",
"path": "args.py",
"snippet": "def init_argparser():\n \"\"\"Creates an argument parser that any function can use\"\"\"\n\n parser = argparse.ArgumentParser(\n epilog=\"These args are only for a subset of the config. \"\n + \"Please edit config.yml for all options!\"\n )\n add_network_args(parser)\n add_model_args(parser)\n add_logging_args(parser)\n add_config_args(parser)\n\n return parser"
}
] | import argparse
import os
import pathlib
import subprocess
from args import convert_args_to_dict, init_argparser
from main import entrypoint | 698 | """Utility to automatically upgrade and start the API"""
def get_requirements_file():
"""Fetches the appropriate requirements file depending on the GPU"""
requirements_name = "requirements-nowheel"
ROCM_PATH = os.environ.get("ROCM_PATH")
CUDA_PATH = os.environ.get("CUDA_PATH")
# TODO: Check if the user has an AMD gpu on windows
if ROCM_PATH:
requirements_name = "requirements-amd"
# Also override env vars for ROCm support on non-supported GPUs
os.environ["ROCM_PATH"] = "/opt/rocm"
os.environ["HSA_OVERRIDE_GFX_VERSION"] = "10.3.0"
os.environ["HCC_AMDGPU_TARGET"] = "gfx1030"
elif CUDA_PATH:
cuda_version = pathlib.Path(CUDA_PATH).name
if "12" in cuda_version:
requirements_name = "requirements"
elif "11" in cuda_version:
requirements_name = "requirements-cu118"
return requirements_name
def add_start_args(parser: argparse.ArgumentParser):
"""Add start script args to the provided parser"""
start_group = parser.add_argument_group("start")
start_group.add_argument(
"-iu",
"--ignore-upgrade",
action="store_true",
help="Ignore requirements upgrade",
)
start_group.add_argument(
"-nw",
"--nowheel",
action="store_true",
help="Don't upgrade wheel dependencies (exllamav2, torch)",
)
if __name__ == "__main__":
subprocess.run(["pip", "-V"])
# Create an argparser and add extra startup script args
parser = init_argparser()
add_start_args(parser)
args = parser.parse_args()
if args.ignore_upgrade:
print("Ignoring pip dependency upgrade due to user request.")
else:
requirements_file = (
"requirements-nowheel" if args.nowheel else get_requirements_file()
)
subprocess.run(["pip", "install", "-U", "-r", f"{requirements_file}.txt"])
# Import entrypoint after installing all requirements
| """Utility to automatically upgrade and start the API"""
def get_requirements_file():
"""Fetches the appropriate requirements file depending on the GPU"""
requirements_name = "requirements-nowheel"
ROCM_PATH = os.environ.get("ROCM_PATH")
CUDA_PATH = os.environ.get("CUDA_PATH")
# TODO: Check if the user has an AMD gpu on windows
if ROCM_PATH:
requirements_name = "requirements-amd"
# Also override env vars for ROCm support on non-supported GPUs
os.environ["ROCM_PATH"] = "/opt/rocm"
os.environ["HSA_OVERRIDE_GFX_VERSION"] = "10.3.0"
os.environ["HCC_AMDGPU_TARGET"] = "gfx1030"
elif CUDA_PATH:
cuda_version = pathlib.Path(CUDA_PATH).name
if "12" in cuda_version:
requirements_name = "requirements"
elif "11" in cuda_version:
requirements_name = "requirements-cu118"
return requirements_name
def add_start_args(parser: argparse.ArgumentParser):
"""Add start script args to the provided parser"""
start_group = parser.add_argument_group("start")
start_group.add_argument(
"-iu",
"--ignore-upgrade",
action="store_true",
help="Ignore requirements upgrade",
)
start_group.add_argument(
"-nw",
"--nowheel",
action="store_true",
help="Don't upgrade wheel dependencies (exllamav2, torch)",
)
if __name__ == "__main__":
subprocess.run(["pip", "-V"])
# Create an argparser and add extra startup script args
parser = init_argparser()
add_start_args(parser)
args = parser.parse_args()
if args.ignore_upgrade:
print("Ignoring pip dependency upgrade due to user request.")
else:
requirements_file = (
"requirements-nowheel" if args.nowheel else get_requirements_file()
)
subprocess.run(["pip", "install", "-U", "-r", f"{requirements_file}.txt"])
# Import entrypoint after installing all requirements
| entrypoint(convert_args_to_dict(args, parser)) | 0 | 2023-11-10 05:54:02+00:00 | 2k |
zorazrw/filco | measure_ctxs.py | [
{
"identifier": "calc_cxmi_score",
"path": "cxmi.py",
"snippet": "def calc_cxmi_score(\n model: AutoModelForSeq2SeqLM,\n tokenizer: AutoTokenizer,\n answer: str,\n base_input: str,\n ctx_input: str,\n apply_sigmoid: bool = False,\n) -> float:\n \"\"\"Compute the CXMI score.\"\"\"\n base_probs = get_output_probs(model, tokenizer, base_input, answer)\n ctx_probs = get_output_probs(model, tokenizer, ctx_input, answer)\n diff = sent_wise_diff(base_scores=base_probs, ctx_scores=ctx_probs)\n if apply_sigmoid:\n diff = sigmoid(diff)\n return diff"
},
{
"identifier": "get_example_inputs",
"path": "cxmi.py",
"snippet": "def get_example_inputs(\n question: str,\n context: str,\n answers: list[str],\n question_prefix: str = \"question\",\n context_prefix: str = \"context\",\n) -> dict:\n \"\"\"Get example inputs for the generation model.\"\"\"\n base_input = get_input_text(\n question,\n context=None,\n question_prefix=question_prefix,\n context_prefix=context_prefix,\n )\n ctx_input = get_input_text(\n question,\n context=context,\n question_prefix=question_prefix,\n context_prefix=context_prefix,\n )\n return {\n \"base_input\": base_input,\n \"ctx_input\": ctx_input,\n \"answers\": answers,\n }"
},
{
"identifier": "calc_unigram_f1",
"path": "eval.py",
"snippet": "def calc_unigram_f1(text: str, answers: list[str], field: str = \"f1\") -> float:\n \"\"\"Calculate unigram f1 score between the text and reference answers.\"\"\"\n norm_pred = normalize_text(text)\n norm_answers = [normalize_text(ans) for ans in answers]\n common_tokens = [\n Counter(norm_pred) & Counter(norm_ans) for norm_ans in norm_answers\n ]\n num_same = [sum(common.values()) for common in common_tokens]\n\n score_list = []\n for i, num in enumerate(num_same):\n if num == 0:\n score_list.append(0.0)\n else:\n p = 1.0 * num / len(norm_pred)\n r = 1.0 * num / len(norm_answers[i])\n f1 = 2 * p * r / (p + r)\n if field == \"precision\":\n score_list.append(p)\n elif field == \"recall\":\n score_list.append(r)\n elif field == \"f1\":\n score_list.append(f1)\n else:\n raise ValueError(f\"Unknown field: {field}\")\n return max(score_list)"
},
{
"identifier": "has_answer",
"path": "eval.py",
"snippet": "def has_answer(text: str, answers: list[str]) -> float:\n \"\"\"Check if text contains any of the answers.\"\"\"\n return float(any([(ans.lower() in text.lower()) for ans in answers]))"
},
{
"identifier": "load_dataset",
"path": "utils.py",
"snippet": "def load_dataset(path: str) -> list[dict]:\n \"\"\"Load dataset from JSON or JSONL file.\"\"\"\n if path.endswith(\".json\"):\n return json.load(open(path, \"r\"))\n elif path.endswith(\".jsonl\"):\n return [json.loads(line.strip()) for line in open(path, \"r\")]\n else:\n extension = path.split(\".\")[-1]\n raise ValueError(f\"File extension [{extension}] not valid.\")"
},
{
"identifier": "write_dataset",
"path": "utils.py",
"snippet": "def write_dataset(path: str, dataset: list[dict]):\n \"\"\"Write dataset to JSON or JSONL file.\"\"\"\n if path.endswith(\".json\"):\n json.dump(dataset, open(path, \"w\"))\n elif path.endswith(\".jsonl\"):\n with open(path, \"w\") as fw:\n for res_dict in dataset:\n fw.write(json.dumps(res_dict) + \"\\n\")\n else:\n extension = path.split(\".\")[-1]\n raise ValueError(f\"File extension [{extension}] not valid.\")"
}
] | import argparse
import torch
from nltk.tokenize import sent_tokenize
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from cxmi import calc_cxmi_score, get_example_inputs
from eval import calc_unigram_f1, has_answer
from utils import load_dataset, write_dataset | 1,119 | """Calculate Scores of Individual Sentences in Retrieved Passages."""
def calc_cxmi(
text: str,
question: str,
answers: list[str],
tokenizer: AutoTokenizer,
model: AutoModelForSeq2SeqLM,
) -> float:
"""Calculate CXMI score for a context text."""
proc_inputs = get_example_inputs(
question=args.prefix + question,
context=text,
answers=answers,
)
cxmi_score = calc_cxmi_score(
model=model,
tokenizer=tokenizer,
answer=proc_inputs["answers"][0],
base_input=proc_inputs["base_input"],
ctx_input=proc_inputs["ctx_input"],
apply_sigmoid=True,
)
return cxmi_score
def main():
"""Run the main context measuring function."""
# load dataset
| """Calculate Scores of Individual Sentences in Retrieved Passages."""
def calc_cxmi(
text: str,
question: str,
answers: list[str],
tokenizer: AutoTokenizer,
model: AutoModelForSeq2SeqLM,
) -> float:
"""Calculate CXMI score for a context text."""
proc_inputs = get_example_inputs(
question=args.prefix + question,
context=text,
answers=answers,
)
cxmi_score = calc_cxmi_score(
model=model,
tokenizer=tokenizer,
answer=proc_inputs["answers"][0],
base_input=proc_inputs["base_input"],
ctx_input=proc_inputs["ctx_input"],
apply_sigmoid=True,
)
return cxmi_score
def main():
"""Run the main context measuring function."""
# load dataset | dataset = load_dataset(args.dataset_path) | 4 | 2023-11-14 21:18:30+00:00 | 2k |
ShipBit/wingman-ai | gui/views/context_view.py | [
{
"identifier": "ContextSwitcher",
"path": "gui/sections/context_switcher.py",
"snippet": "class ContextSwitcher(ctk.CTkFrame):\n# class ContextSwitcher(ctk.CTkScrollableFrame):\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n self.grid_columnconfigure(0, weight=1)\n self.master = master\n self.contexts = master.core.config_manager.contexts\n self.context_buttons = {}\n self.active_context = \"\"\n\n self.spacer = ctk.CTkLabel(self, text=\"\")\n self.spacer.grid(row=0, column=0)\n\n for i, context_name in enumerate(self.contexts):\n context_button = IconButton(self,\n icon=f\"context-icon_{CONTEXT_COLORS[i % len(CONTEXT_COLORS)]}\" if context_name else \"context-icon\",\n themed=False,\n command=lambda c=context_name: self.activate_context(c))\n context_button.grid(row=i+1, column=0, padx=14, pady=14)\n self.context_buttons[context_name] = context_button\n if not context_name:\n self.__set_context_button_state(\"\", False)\n\n\n def __set_context_button_state(self, context, active=True):\n context_button = self.context_buttons.get(context)\n if context_button:\n context_button.configure(state=\"normal\" if active else \"disabled\",\n fg_color=\"transparent\" if active else (\"grey60\", \"grey40\"))\n\n\n def activate_context(self, context):\n self.__set_context_button_state(self.active_context, True)\n\n if self.master:\n update_context = getattr(self.master, \"update_context\", None)\n if callable(update_context):\n update_context(context)\n\n self.active_context = context\n self.__set_context_button_state(self.active_context, False)"
},
{
"identifier": "ContextRunner",
"path": "gui/sections/context_runner.py",
"snippet": "class ContextRunner(ctk.CTkFrame):\n def __init__(self, master, context=\"\", **kwargs):\n super().__init__(master, **kwargs)\n\n self.core = master.core\n self.core.load_context(context)\n self.status_var = ctk.StringVar(self, \"Inactive\", \"status\")\n tower = self.core.tower\n auto_run = self.core.config_manager.gui_config.get(\"auto-run\", \"off\") == \"on\"\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(3, weight=1)\n\n context_title = (\n context.title().replace(\"_\", \" \").strip() if context else \"Default\"\n )\n self.title = ctk.CTkLabel(\n self,\n text=context_title,\n font=(\"TkHeadingFont\", 20, \"bold\"),\n text_color=\"#EB154D\",\n )\n self.title.grid(row=0, column=0, padx=20, pady=10, sticky=\"w\")\n\n # TODO: Make this a component\n self.status = ctk.CTkLabel(\n self,\n textvariable=self.status_var,\n anchor=\"w\",\n fg_color=(\"grey70\", \"grey30\"),\n corner_radius=10,\n width=65,\n pady=3,\n )\n self.status.grid(row=0, column=0, padx=20, pady=10, sticky=\"e\")\n self.status_icon_active = Icon(\"state_active\", 16, False)\n self.status_icon_inactive = Icon(\"state_inactive\", 16, False)\n self.status_led = ctk.CTkLabel(\n self, image=self.status_icon_inactive, text=\"\", fg_color=\"transparent\"\n )\n self.status_led.grid(row=0, column=0, padx=95, pady=10, sticky=\"e\")\n\n wingmen = []\n if tower:\n wingmen = tower.get_wingmen()\n self.wingmen_list = WingmenList(self, wingmen=wingmen)\n self.wingmen_list.grid(row=1, column=0, padx=20, pady=10, sticky=\"ew\")\n\n broken_wingmen = []\n if tower:\n broken_wingmen = tower.get_broken_wingmen()\n if len(broken_wingmen) > 0:\n self.broken_wingmen_list = WingmenList(\n self, wingmen=broken_wingmen, broken=True\n )\n self.broken_wingmen_list.grid(\n row=2, column=0, padx=20, pady=10, sticky=\"ew\"\n )\n\n self.terminal = ctk.CTkTextbox(self)\n self.terminal.grid(row=3, column=0, padx=20, pady=10, sticky=\"nesw\")\n self.terminal.configure(state=\"disabled\", wrap=\"word\")\n printr.set_output(\"main\", self.terminal)\n if len(wingmen) and not auto_run:\n printr.print(\n f\"Press 'Run' to start your wingm{'e' if len(wingmen) > 1 else 'a'}n!\"\n )\n\n self.button = ctk.CTkButton(\n self,\n text=\"Run\",\n command=self.toggle_listener,\n height=45,\n font=(\"TkHeadingFont\", 22, \"bold\"),\n )\n self.button.grid(row=4, column=0, padx=20, pady=10, sticky=\"ew\")\n if not tower:\n printr.print_err(\n f\"Could not load context.\\nPlease check your context configuration for `{context_title}`.\"\n )\n self.button.configure(state=\"disabled\")\n elif len(wingmen) <= 0:\n printr.print_warn(f\"No runnable Wingman found for `{context_title}`.\")\n self.button.configure(state=\"disabled\")\n elif auto_run:\n self.toggle_listener()\n\n\n def toggle_listener(self):\n if self.core.active:\n self.core.deactivate()\n self.status_var.set(\"Inactive\")\n self.status_led.configure(image=self.status_icon_inactive)\n self.button.configure(text=\"Run\")\n printr.print(\n \"Your Wingman is now inactive.\\nPress 'Run' to start listening again.\"\n )\n else:\n self.core.activate()\n self.status_var.set(\"Active\")\n self.status_led.configure(image=self.status_icon_active)\n self.button.configure(text=\"Stop\")\n printr.print(\n \"Your Wingman is now listening for commands.\\nPress 'Stop' to stop listening.\"\n )"
}
] | import customtkinter as ctk
from gui.sections.context_switcher import ContextSwitcher
from gui.sections.context_runner import ContextRunner | 1,580 |
class ContextView(ctk.CTkFrame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.core = master.core
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(0, weight=1)
|
class ContextView(ctk.CTkFrame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.core = master.core
self.grid_columnconfigure(1, weight=1)
self.grid_rowconfigure(0, weight=1)
| self.context_switcher = ContextSwitcher(self, width=88, corner_radius=0) | 0 | 2023-11-15 09:36:06+00:00 | 2k |
OliverMao/FlaskAutoApiBuilder | demo/app.py | [
{
"identifier": "Faab",
"path": "Faab/Faab.py",
"snippet": "class Faab(Flask):\n _startup_message_printed = False\n models = []\n db_config = object()\n need_register_bp = []\n\n def __init__(self, **options):\n # 初始化函数,接收一个字符串类型的参数import_name\n super().__init__(**options)\n\n def add_models(self, model: list):\n # 添加模型函数,接收一个列表类型的参数model\n self.models = model\n\n def add_db_config(self, db_config: object):\n # 添加数据库配置函数,接收一个对象类型的参数db_config\n self.db_config = db_config\n\n def add_blueprints(self, blueprint: list):\n # 添加蓝图函数,接收一个列表类型的参数blueprint\n self.need_register_bp = blueprint\n\n def faab_ready(self):\n create_app(app=self, models=self.models, db_config=self.db_config, url_prefix=\"/api\",\n blueprints=self.need_register_bp)\n CORS(self, resources=r'/*')\n self._print_startup_message()\n\n def run(\n self,\n host: str | None = None,\n port: int | None = None,\n debug: bool | None = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n super().run(host, port, debug, load_dotenv, **options)\n\n def _print_startup_message(self):\n if not getattr(self, '_startup_message_printed', False):\n print(\"\\033[1;32m * Faab Version:\", version)\n print('''\n ███████╗ █████╗ █████╗ ██████╗ ██╗ ██╗ ██████╗ ██████╗ ██████╗ ██╗████████╗ ██████╗███╗ ██╗ \n ██╔════╝██╔══██╗██╔══██╗██╔══██╗ ╚██╗ ██╔╝██╔═══██╗██╔═══██╗██╔══██╗██║╚══██╔══╝██╔════╝████╗ ██║ \n █████╗ ███████║███████║██████╔╝█████╗╚████╔╝ ██║ ██║██║ ██║██████╔╝██║ ██║ ██║ ██╔██╗ ██║ \n ██╔══╝ ██╔══██║██╔══██║██╔══██╗╚════╝ ╚██╔╝ ██║ ██║██║ ██║██╔══██╗██║ ██║ ██║ ██║╚██╗██║ \n ██║ ██║ ██║██║ ██║██████╔╝ ██║ ╚██████╔╝╚██████╔╝██████╔╝██║ ██║██╗╚██████╗██║ ╚████║\n ''')\n\n self._startup_message_printed = True"
},
{
"identifier": "jwt_authentication",
"path": "Faab/FaabJWT.py",
"snippet": "def jwt_authentication():\n \"\"\"\n 1.获取请求头Authorization中的token\n 2.判断是否以 Bearer开头\n 3.使用jwt模块进行校验\n 4.判断校验结果,成功就提取token中的载荷信息,赋值给g对象保存\n \"\"\"\n auth = request.headers.get('Authorization')\n\n if auth and auth.startswith('Bearer '):\n \"提取token 0-6 被Bearer和空格占用 取下标7以后的所有字符\"\n token = auth[7:]\n \"校验token\"\n g.username = None\n try:\n \"判断token的校验结果\"\n payload = jwt.decode(token, SALT, algorithms=['HS256'])\n \"获取载荷中的信息赋值给g对象\"\n g.username = payload.get('username')\n except exceptions.ExpiredSignatureError: # 'token已失效'\n g.username = -1\n except jwt.DecodeError: # 'token认证失败'\n g.username = -2\n except jwt.InvalidTokenError: # '非法的token'\n g.username = -3\n # print(g.username)"
}
] | from Faab import Faab
from Faab.FaabJWT import jwt_authentication
from blueprints.test import test_bp
from blueprints.test.model import Users
import factory as fac | 1,300 | # Faab Project Demo
class DBConfig(object):
# 基础配置
user = 'faab'
host = 'localhost'
password = 'faab'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://%s:%s@%s:3306/%s' % (user, password, host, 'faab')
SQLALCHEMY_BINDS = {
'test': 'mysql+pymysql://%s:%s@%s:3306/%s' % (user, password, host, 'test')
}
SECRET_KEY = 'session_key'
models = [
[
{
"model": Users,
"bp": test_bp,
"url_prefix": "Users"
}
]
]
app = Faab(import_name=__name__, static_url_path='/s')
app.add_models(models)
app.add_db_config(DBConfig)
fac.register(app)
app.faab_ready()
application = app # uWSGI启动必须有application
@app.before_request
def auth():
| # Faab Project Demo
class DBConfig(object):
# 基础配置
user = 'faab'
host = 'localhost'
password = 'faab'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://%s:%s@%s:3306/%s' % (user, password, host, 'faab')
SQLALCHEMY_BINDS = {
'test': 'mysql+pymysql://%s:%s@%s:3306/%s' % (user, password, host, 'test')
}
SECRET_KEY = 'session_key'
models = [
[
{
"model": Users,
"bp": test_bp,
"url_prefix": "Users"
}
]
]
app = Faab(import_name=__name__, static_url_path='/s')
app.add_models(models)
app.add_db_config(DBConfig)
fac.register(app)
app.faab_ready()
application = app # uWSGI启动必须有application
@app.before_request
def auth(): | jwt_authentication() | 1 | 2023-11-10 09:25:44+00:00 | 2k |
leeyuentuen/polestar_api | custom_components/polestar_api/pypolestar/polestar.py | [
{
"identifier": "PolestarAuth",
"path": "custom_components/polestar_api/pypolestar/auth.py",
"snippet": "class PolestarAuth:\n \"\"\"base class for Polestar authentication.\"\"\"\n\n def __init__(self, username: str, password: str) -> None:\n \"\"\"Initialize the Polestar authentication.\"\"\"\n self.username = username\n self.password = password\n self.access_token = None\n self.refresh_token = None\n self.token_expiry = None\n self.latest_call_code = None\n self._client_session = httpx.AsyncClient()\n\n async def get_token(self, refresh=False) -> None:\n \"\"\"Get the token from Polestar.\"\"\"\n headers = {\"Content-Type\": \"application/json\"}\n operationName = \"getAuthToken\"\n if not refresh:\n code = await self._get_code()\n if code is None:\n return\n params = {\n \"query\": \"query getAuthToken($code: String!) { getAuthToken(code: $code) { id_token access_token refresh_token expires_in }}\",\n \"operationName\": operationName,\n \"variables\": json.dumps({\"code\": code}),\n }\n else:\n if self.refresh_token is None:\n return\n token = self.refresh_token\n operationName = \"refreshAuthToken\"\n headers[\"Authorization\"] = f\"Bearer {self.access_token}\"\n\n params = {\n \"query\": \"query refreshAuthToken($token: String!) { refreshAuthToken(token: $token) { id_token access_token refresh_token expires_in }}\",\n \"operationName\": operationName,\n \"variables\": json.dumps({\"token\": token}),\n }\n result = await self._client_session.get(\"https://pc-api.polestar.com/eu-north-1/auth/\", params=params, headers=headers, timeout=HTTPX_TIMEOUT)\n self.latest_call_code = result.status_code\n resultData = result.json()\n if result.status_code != 200 or (\"errors\" in resultData and len(resultData[\"errors\"])):\n _LOGGER.error(result)\n raise PolestarAuthException(\"Error getting token\", result.status_code)\n _LOGGER.debug(resultData)\n\n if resultData['data']:\n self.access_token = resultData['data'][operationName]['access_token']\n self.refresh_token = resultData['data'][operationName]['refresh_token']\n self.token_expiry = datetime.now(\n ) + timedelta(seconds=resultData['data'][operationName]['expires_in'])\n # ID Token\n\n _LOGGER.debug(f\"Response {self.access_token}\")\n\n async def _get_code(self) -> None:\n query_params = await self._get_resume_path()\n\n # check if code is in query_params\n if query_params.get('code'):\n return query_params.get('code')\n\n # get the resumePath\n if query_params.get('resumePath'):\n resumePath = query_params.get('resumePath')\n\n if resumePath is None:\n return\n\n params = {\n 'client_id': 'polmystar'\n }\n data = {\n 'pf.username': self.username,\n 'pf.pass': self.password\n }\n result = await self._client_session.post(\n f\"https://polestarid.eu.polestar.com/as/{resumePath}/resume/as/authorization.ping\",\n params=params,\n data=data\n )\n self.latest_call_code = result.status_code\n if result.status_code != 302:\n raise PolestarAuthException(\"Error getting code\", result.status_code)\n\n # get the realUrl\n url = result.url\n code = result.next_request.url.params.get('code')\n\n # sign-in-callback\n result = await self._client_session.get(result.next_request.url, timeout=HTTPX_TIMEOUT)\n self.latest_call_code = result.status_code\n\n if result.status_code != 200:\n _LOGGER.error(result)\n raise PolestarAuthException(\"Error getting code callback\", result.status_code)\n\n # url encode the code\n result = await self._client_session.get(url)\n self.latest_call_code = result.status_code\n\n return code\n\n async def _get_resume_path(self):\n \"\"\"Get Resume Path from Polestar.\"\"\"\n params = {\n \"response_type\": \"code\",\n \"client_id\": \"polmystar\",\n \"redirect_uri\": \"https://www.polestar.com/sign-in-callback\"\n }\n result = await self._client_session.get(\"https://polestarid.eu.polestar.com/as/authorization.oauth2\", params=params, timeout=HTTPX_TIMEOUT)\n if result.status_code in (303, 302):\n return result.next_request.url.params\n\n _LOGGER.error(result.text)\n raise PolestarAuthException(\"Error getting resume path \", result.status_code)"
},
{
"identifier": "BATTERY_DATA",
"path": "custom_components/polestar_api/pypolestar/const.py",
"snippet": "BATTERY_DATA = \"getBatteryData\""
},
{
"identifier": "CACHE_TIME",
"path": "custom_components/polestar_api/pypolestar/const.py",
"snippet": "CACHE_TIME = 30"
},
{
"identifier": "CAR_INFO_DATA",
"path": "custom_components/polestar_api/pypolestar/const.py",
"snippet": "CAR_INFO_DATA = \"getConsumerCarsV2\""
},
{
"identifier": "ODO_METER_DATA",
"path": "custom_components/polestar_api/pypolestar/const.py",
"snippet": "ODO_METER_DATA = \"getOdometerData\""
},
{
"identifier": "PolestarApiException",
"path": "custom_components/polestar_api/pypolestar/exception.py",
"snippet": "class PolestarApiException(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\""
},
{
"identifier": "PolestarAuthException",
"path": "custom_components/polestar_api/pypolestar/exception.py",
"snippet": "class PolestarAuthException(Exception):\n \"\"\"Base class for exceptions in Auth module.\"\"\"\n\n error_code: int = None\n message: str = None\n\n def __init__(self, message, error_code) -> None:\n \"\"\"Initialize the Polestar API.\"\"\"\n super().__init__(message)\n self.error_code = error_code"
},
{
"identifier": "PolestarNoDataException",
"path": "custom_components/polestar_api/pypolestar/exception.py",
"snippet": "class PolestarNoDataException(Exception):\n \"\"\"Exception for no data.\"\"\""
},
{
"identifier": "PolestarNotAuthorizedException",
"path": "custom_components/polestar_api/pypolestar/exception.py",
"snippet": "class PolestarNotAuthorizedException(Exception):\n \"\"\"Exception for unauthorized call.\"\"\""
}
] | from datetime import datetime, timedelta
from .auth import PolestarAuth
from .const import BATTERY_DATA, CACHE_TIME, CAR_INFO_DATA, ODO_METER_DATA
from .exception import (
PolestarApiException,
PolestarAuthException,
PolestarNoDataException,
PolestarNotAuthorizedException,
)
import logging
import httpx | 1,584 | """Asynchronous Python client for the Polestar API."""""
_LOGGER = logging.getLogger(__name__)
class PolestarApi:
"""Main class for handling connections with the Polestar API."""
def __init__(self, username: str, password: str) -> None:
"""Initialize the Polestar API."""
| """Asynchronous Python client for the Polestar API."""""
_LOGGER = logging.getLogger(__name__)
class PolestarApi:
"""Main class for handling connections with the Polestar API."""
def __init__(self, username: str, password: str) -> None:
"""Initialize the Polestar API.""" | self.auth = PolestarAuth(username, password) | 0 | 2023-11-17 21:24:36+00:00 | 2k |
dubverse-ai/MahaTTS | maha_tts/models/autoregressive.py | [
{
"identifier": "config",
"path": "maha_tts/config.py",
"snippet": "class config:\n \n semantic_model_centroids = 10000 + 1\n seed_value = 3407\n\n # Text to Semantic\n t2s_position = 4096\n langs = ['english','tamil', 'telugu', 'punjabi', 'marathi', 'hindi', 'gujarati', 'bengali', 'assamese']\n lang_index = {i:j for j,i in enumerate(langs)}\n # Semantic to acoustic\n sa_timesteps_max = 1000\n\n #Acoustic Properties\n CLIP_LENGTH = 500\n MAX_WAV_VALUE=32768.0\n filter_length=1024\n hop_length=256 #256\n window = 'hann'\n win_length=1024\n n_mel_channels=80\n sampling_rate=22050\n mel_fmin=0.0\n mel_fmax=8000.0"
},
{
"identifier": "labels",
"path": "maha_tts/text/symbols.py",
"snippet": ""
},
{
"identifier": "GST",
"path": "maha_tts/models/modules.py",
"snippet": "class GST(nn.Module):\n def __init__(self,model_channels=512,num_heads=8,in_channels=80,k=2):\n super(GST,self).__init__()\n self.model_channels=model_channels\n self.num_heads=num_heads\n\n self.reference_encoder=nn.Sequential(\n nn.Conv1d(in_channels,model_channels,3,padding=1,stride=2),\n nn.Conv1d(model_channels, model_channels*k,3,padding=1,stride=2),\n AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False),\n AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False),\n AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False),\n AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False),\n AttentionBlock(model_channels*k, num_heads, relative_pos_embeddings=True, do_checkpoint=False)\n )\n\n def forward(self,x):\n x=self.reference_encoder(x)\n return x"
}
] | import os,sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import functools
from typing import Any
from torch.utils.data import Dataset,DataLoader
from transformers import GPT2Tokenizer,GPT2Config, GPT2Model, GPT2LMHeadModel
from tqdm import tqdm
from maha_tts.config import config
from maha_tts.text.symbols import labels,code_labels,text_labels,text_labels_en
from maha_tts.models.modules import GST | 919 | '''
Inspiration taken from https://github.com/neonbjb/tortoise-tts/blob/main/tortoise/models/autoregressive.py
'''
def null_position_embeddings(range, dim):
return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device)
class TS_model(nn.Module):
def __init__(self,n_embed = 512, n_layer = 16, n_head = 8, n_positions = 2048, name='Smolie-in'):
super(TS_model,self).__init__()
self.vocab_size=len(labels)
self.n_positions=n_positions
self.n_embed=n_embed
self.n_layer=n_layer
self.n_head=n_head
self.name=name
self.config = GPT2Config(vocab_size=self.vocab_size,n_positions=self.n_positions,n_embd=self.n_embed,n_layer=self.n_layer,n_head=self.n_head)
self.gpt = GPT2Model(self.config)
del self.gpt.wpe
self.gpt.wpe = functools.partial(null_position_embeddings, dim=self.n_embed)
# Built-in token embeddings are unused.
del self.gpt.wte
self.GST = GST(model_channels=self.n_embed,num_heads=self.n_head,in_channels=config.n_mel_channels,k=1)
if self.name == 'Smolie-en':
self.text_head = nn.Linear(self.n_embed,len(text_labels_en))
else:
| '''
Inspiration taken from https://github.com/neonbjb/tortoise-tts/blob/main/tortoise/models/autoregressive.py
'''
def null_position_embeddings(range, dim):
return torch.zeros((range.shape[0], range.shape[1], dim), device=range.device)
class TS_model(nn.Module):
def __init__(self,n_embed = 512, n_layer = 16, n_head = 8, n_positions = 2048, name='Smolie-in'):
super(TS_model,self).__init__()
self.vocab_size=len(labels)
self.n_positions=n_positions
self.n_embed=n_embed
self.n_layer=n_layer
self.n_head=n_head
self.name=name
self.config = GPT2Config(vocab_size=self.vocab_size,n_positions=self.n_positions,n_embd=self.n_embed,n_layer=self.n_layer,n_head=self.n_head)
self.gpt = GPT2Model(self.config)
del self.gpt.wpe
self.gpt.wpe = functools.partial(null_position_embeddings, dim=self.n_embed)
# Built-in token embeddings are unused.
del self.gpt.wte
self.GST = GST(model_channels=self.n_embed,num_heads=self.n_head,in_channels=config.n_mel_channels,k=1)
if self.name == 'Smolie-en':
self.text_head = nn.Linear(self.n_embed,len(text_labels_en))
else: | self.text_head = nn.Linear(self.n_embed,len(text_labels)) | 1 | 2023-11-16 09:44:54+00:00 | 2k |
WCGKING/KINGUSERBOT | Branded/plugins/gdelete.py | [
{
"identifier": "is_gdel_user",
"path": "Branded/modules/data.py",
"snippet": "async def is_gdel_user(user_id: int) -> bool:\n user = await gdeldb.find_one({\"user_id\": user_id})\n if not user:\n return False\n return True"
},
{
"identifier": "get_gdel_user",
"path": "Branded/modules/data.py",
"snippet": "async def get_gdel_user() -> list:\n results = []\n async for user in gdeldb.find({\"user_id\": {\"$gt\": 0}}):\n user_id = user[\"user_id\"]\n results.append(user_id)\n return results"
},
{
"identifier": "get_gdel_count",
"path": "Branded/modules/data.py",
"snippet": "async def get_gdel_count() -> int:\n users = gdeldb.find({\"user_id\": {\"$gt\": 0}})\n users = await users.to_list(length=100000)\n return len(users)"
},
{
"identifier": "add_gdel_user",
"path": "Branded/modules/data.py",
"snippet": "async def add_gdel_user(user_id: int):\n is_gdel = await is_gdel_user(user_id)\n if is_gdel:\n return\n return await gdeldb.insert_one({\"user_id\": user_id})"
},
{
"identifier": "del_gdel_user",
"path": "Branded/modules/data.py",
"snippet": "async def del_gdel_user(user_id: int):\n is_gdel = await is_gdel_user(user_id)\n if not is_gdel:\n return\n return await gdeldb.delete_one({\"user_id\": user_id})"
}
] | import asyncio
from pyrogram import *
from pyrogram.types import Message
from .. import *
from ..modules.data import (is_gdel_user,
get_gdel_user, get_gdel_count,
add_gdel_user, del_gdel_user) | 643 |
@app.on_message(commandx(["gdl", "gdel", "gdelete"]) & SUPUSER)
async def add_gdelete_user(client, message: Message):
if not message.reply_to_message:
if len(message.command) != 2:
return await message.reply_text("Reply to a user's message or give username/user_id.")
user = message.text.split(None, 1)[1]
user = await app.get_users(user)
user_id = user.id
mention = user.mention
else:
user_id = message.reply_to_message.from_user.id
mention = message.reply_to_message.from_user.mention
if user_id == message.from_user.id:
return await message.reply_text("You want to add Global Delete yourself? How Fool!")
elif user_id == SUPUSER:
return await message.reply_text("Should i activate Global Delete on myself? Lol")
elif user_id in SUDOERS:
return await message.reply_text("You want add Global Delete on sudo user?")
is_gdel = await is_gdel_user(user_id)
if is_gdel:
return await message.reply_text("{0} is already affected by **Global Delete**".format(mention))
if user_id not in GDELSUB:
GDELSUB.add(user_id)
|
@app.on_message(commandx(["gdl", "gdel", "gdelete"]) & SUPUSER)
async def add_gdelete_user(client, message: Message):
if not message.reply_to_message:
if len(message.command) != 2:
return await message.reply_text("Reply to a user's message or give username/user_id.")
user = message.text.split(None, 1)[1]
user = await app.get_users(user)
user_id = user.id
mention = user.mention
else:
user_id = message.reply_to_message.from_user.id
mention = message.reply_to_message.from_user.mention
if user_id == message.from_user.id:
return await message.reply_text("You want to add Global Delete yourself? How Fool!")
elif user_id == SUPUSER:
return await message.reply_text("Should i activate Global Delete on myself? Lol")
elif user_id in SUDOERS:
return await message.reply_text("You want add Global Delete on sudo user?")
is_gdel = await is_gdel_user(user_id)
if is_gdel:
return await message.reply_text("{0} is already affected by **Global Delete**".format(mention))
if user_id not in GDELSUB:
GDELSUB.add(user_id) | await add_gdel_user(user_id) | 3 | 2023-11-14 13:24:26+00:00 | 2k |
kudelskisecurity/fuzzomatic | fuzzomatic/docparse.py | [
{
"identifier": "score_functions",
"path": "fuzzomatic/approaches/functions.py",
"snippet": "def score_functions(functions):\n interesting_function_names = [\"parse\", \"load\", \"read\", \"str\", \"eval\"]\n # order functions by most interesting first\n ordered_functions = []\n for f in functions:\n function_name = f[1]\n args = f[2]\n priority = 0\n\n is_name_interesting = False\n for pattern in interesting_function_names:\n if pattern in function_name:\n is_name_interesting = True\n\n if len(args) == 1:\n arg_type = args[0]\n\n if arg_type == \"&str\":\n priority = 100\n elif arg_type == \"&[u8]\":\n priority = 100\n elif arg_type == \"String\":\n priority = 100\n elif arg_type == \"bool\":\n priority = 0\n elif arg_type == \"unknown\":\n priority = 10\n elif type(arg_type) == tuple and arg_type[0] == \"&array\":\n priority = 100\n elif is_name_interesting:\n priority = 100\n\n if args[0] == \"self\":\n priority = -15\n elif args[0] == \"self\":\n # functions with \"self\" as first argument\n priority = -50\n else:\n priority = 50\n elif len(args) > 1:\n known_types = 0\n for arg in args:\n if arg != \"unknown\":\n known_types += 1\n if known_types == len(args):\n priority = 30\n if \"&str\" in args or \"&[u8]\" in args or \"String\" in args:\n priority = 75\n if any(type(arg) == tuple and arg[0] == \"&array\" for arg in args):\n priority = 75\n else:\n # functions with multiple arguments where not all types are known\n priority = -10\n\n if args[0] == \"self\":\n # functions with \"self\" as first argument\n priority = -50\n else:\n # skip functions with no arguments\n priority = -100\n\n # give low priority to functions that are likely to load something by filename\n if \"file\" in function_name and arg_type == \"&str\":\n priority = 0\n\n augmented_function = [*f, priority]\n ordered_functions.append(augmented_function)\n ordered_functions = sorted(ordered_functions, key=lambda x: x[3], reverse=True)\n return ordered_functions"
},
{
"identifier": "parse_cargo_doc_json",
"path": "fuzzomatic/tools/cargo_doc.py",
"snippet": "def parse_cargo_doc_json(path):\n with open(path) as f:\n jso = json.loads(f.read())\n\n # get functions that take only one parameter and that are public\n root = jso[\"root\"]\n index = jso[\"index\"]\n root_elem = index[root]\n root_inner_items = root_elem[\"inner\"][\"module\"][\"items\"]\n\n functions = []\n\n for elem in root_inner_items:\n path = []\n e = index[elem]\n funcs = parse_item(index, e, path)\n functions.extend(funcs)\n\n return functions"
}
] | import argparse
from fuzzomatic.approaches.functions import score_functions
from fuzzomatic.tools.cargo_doc import parse_cargo_doc_json | 906 | #!/usr/bin/env python3
def get_parser():
prog_name = "docparse"
parser = argparse.ArgumentParser(
prog=prog_name,
description="Parse cargo doc json and print public functions",
)
parser.add_argument(
"json_path",
help="Path to cargo doc json file",
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
functions = parse_cargo_doc_json(args.json_path)
| #!/usr/bin/env python3
def get_parser():
prog_name = "docparse"
parser = argparse.ArgumentParser(
prog=prog_name,
description="Parse cargo doc json and print public functions",
)
parser.add_argument(
"json_path",
help="Path to cargo doc json file",
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
functions = parse_cargo_doc_json(args.json_path) | ordered_functions = score_functions(functions) | 0 | 2023-11-14 09:52:59+00:00 | 2k |
muyuworks/myla | myla/vectorstores/lancedb_vectorstore.py | [
{
"identifier": "Record",
"path": "myla/vectorstores/_base.py",
"snippet": "class Record(Dict):\n @staticmethod\n def values_to_text(record: Dict, props: List[str] = None, separator: str = '\\001'):\n if props and not isinstance(props, list):\n raise ValueError(\"props should be a list\")\n if props:\n o = itemgetter(*props)\n if len(props) == 1:\n v = [o(record)]\n else:\n v = list(o(record))\n else:\n v = list(record.values())\n vl = []\n for i in v:\n if not isinstance(i, str):\n vl.append(json.dumps(i, ensure_ascii=False))\n else:\n vl.append(i)\n v = vl\n return separator.join(v)"
},
{
"identifier": "VectorStore",
"path": "myla/vectorstores/_base.py",
"snippet": "class VectorStore(ABC):\n def __init__(self) -> None:\n pass\n\n @abstractmethod\n def create_collection(self, collection: str, schema: Dict[str, type] = None, mode=\"create\"):\n \"\"\"Create a new collection\"\"\"\n\n @abstractmethod\n def add(\n self,\n collection: str,\n records: List[Record],\n embeddings_columns: Optional[List[str]] = None,\n vectors: Optional[List[List[float]]] = None,\n **kwargs\n ):\n \"\"\"Add record to the vectorsotre\"\"\"\n\n @abstractmethod\n def delete(self, collection: str, query: str):\n \"\"\"Delete record from the vectorstore\"\"\"\n\n @abstractmethod\n def search(\n self,\n collection: str = None,\n query: str = None,\n vector: List = None,\n filter: Any = None,\n limit: int = 20,\n columns: Optional[List[str]] = None,\n with_vector: bool = False,\n with_distance: bool = False,\n **kwargs\n ) -> Optional[List[Record]]:\n \"\"\"Search records\"\"\"\n\n async def asearch(\n self,\n collection: str = None,\n query: str = None,\n vector: List = None,\n filter: Any = None,\n limit: int = 20,\n columns: Optional[List[str]] = None,\n with_vector: bool = False,\n with_distance: bool = False,\n **kwargs\n ):\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.search, **kwargs), collection, query, vector, filter, limit, columns, with_vector, with_distance\n )"
},
{
"identifier": "Embeddings",
"path": "myla/vectorstores/_embeddings.py",
"snippet": "class Embeddings(ABC):\n\n @abstractmethod\n def embed_batch(self, texts: List[str], **kwargs) -> List[List[float]]:\n \"\"\"Embed text batch.\"\"\"\n\n def embed(self, text: str, **kwargs) -> List[float]:\n \"\"\"Embed text.\"\"\"\n return self.embed_batch(texts=[text], **kwargs)[0]\n\n async def aembed(self, text: str, **kwargs) -> List[float]:\n \"\"\"Asynchronous Embed text.\"\"\"\n return await asyncio.get_running_loop().run_in_executor(\n None, self.embed, text, **kwargs\n )\n\n async def aembed_batch(self, texts: [str], **kwargs) -> List[List[float]]:\n \"\"\"Asynchronous Embed text.\"\"\"\n return await asyncio.get_running_loop().run_in_executor(\n None, self.embed_batch, texts, **kwargs\n )"
}
] | from typing import Any, List, Optional, Dict
from ._base import Record, VectorStore
from ._embeddings import Embeddings
import pyarrow as pa
import lancedb as lancedb
import pyarrow as pa | 1,142 |
VECTOR_COLUMN_NAME = "_vector"
class LanceDB(VectorStore):
def __init__(self, db_uri, embeddings: Embeddings = None) -> None:
super().__init__()
try:
pa.__version__
except ImportError as exc:
raise ImportError(
"Could not import pyarrow python package. "
"Please install it with `pip install pyarrow`."
) from exc
try:
# disable diagnostics
lancedb.utils.CONFIG['diagnostics'] = False
except ImportError as exc:
raise ImportError(
"Could not import lancedb python package. "
"Please install it with `pip install lancedb`."
) from exc
self._db_uri = db_uri
self._embeddings = embeddings
self._db = lancedb.connect(self._db_uri)
self._tables = {}
def create_collection(self, collection: str, schema: Dict[str, type] = None, mode="create"):
if schema is None:
raise ValueError("Invalid schema to create LanceDB table.")
s = self._convert_schema(schema=schema)
self._db.create_table(collection, schema=s, mode=mode)
def add(
self,
collection: str,
|
VECTOR_COLUMN_NAME = "_vector"
class LanceDB(VectorStore):
def __init__(self, db_uri, embeddings: Embeddings = None) -> None:
super().__init__()
try:
pa.__version__
except ImportError as exc:
raise ImportError(
"Could not import pyarrow python package. "
"Please install it with `pip install pyarrow`."
) from exc
try:
# disable diagnostics
lancedb.utils.CONFIG['diagnostics'] = False
except ImportError as exc:
raise ImportError(
"Could not import lancedb python package. "
"Please install it with `pip install lancedb`."
) from exc
self._db_uri = db_uri
self._embeddings = embeddings
self._db = lancedb.connect(self._db_uri)
self._tables = {}
def create_collection(self, collection: str, schema: Dict[str, type] = None, mode="create"):
if schema is None:
raise ValueError("Invalid schema to create LanceDB table.")
s = self._convert_schema(schema=schema)
self._db.create_table(collection, schema=s, mode=mode)
def add(
self,
collection: str, | records: List[Record], | 0 | 2023-11-15 01:05:03+00:00 | 2k |
OSU-NLP-Group/TableLlama | inference_row_pop.py | [
{
"identifier": "replace_llama_attn",
"path": "llama_attn_replace.py",
"snippet": "def replace_llama_attn(use_flash_attn=True, use_full=False):\n if use_flash_attn:\n cuda_major, cuda_minor = torch.cuda.get_device_capability()\n if cuda_major < 8:\n warnings.warn(\n \"Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward.\"\n \"ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593\"\n )\n transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (\n _prepare_decoder_attention_mask\n )\n transformers.models.llama.modeling_llama.LlamaAttention.forward = forward_flashattn_full if use_full else forward_flashattn\n else:\n transformers.models.llama.modeling_llama.LlamaAttention.forward = forward_noflashattn"
},
{
"identifier": "PROMPT_DICT",
"path": "supervised_fine_tune.py",
"snippet": "PROMPT_DICT = {\n \"prompt_input\": (\n \"Below is an instruction that describes a task, paired with an input that provides further context. \"\n \"Write a response that appropriately completes the request.\\n\\n\"\n \"### Instruction:\\n{instruction}\\n\\n### Input:\\n{input_seg}\\n\\n### Question:\\n{question}\\n\\n### Response:\"\n ),\n \"prompt_no_input\": (\n \"Below is an instruction that describes a task. \"\n \"Write a response that appropriately completes the request.\\n\\n\"\n \"### Instruction:\\n{instruction}\\n\\n### Response:\"\n ),\n}"
}
] | import os
import json
import sys
import math
import torch
import argparse
import transformers
from peft import PeftModel
from transformers import GenerationConfig
from llama_attn_replace import replace_llama_attn
from supervised_fine_tune import PROMPT_DICT
from tqdm import tqdm | 670 | # import textwrap
# from queue import Queue
# from threading import Thread
# import gradio as gr
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--base_model', type=str, default="/data1/pretrained-models/llama-7b-hf")
parser.add_argument('--cache_dir', type=str, default="./cache")
parser.add_argument('--context_size', type=int, default=-1, help='context size during fine-tuning')
parser.add_argument('--flash_attn', type=bool, default=False, help='')
parser.add_argument('--temperature', type=float, default=0.6, help='')
parser.add_argument('--top_p', type=float, default=0.9, help='')
parser.add_argument('--max_gen_len', type=int, default=512, help='')
parser.add_argument('--input_data_file', type=str, default='input_data/', help='')
parser.add_argument('--output_data_file', type=str, default='output_data/', help='')
args = parser.parse_args()
return args
def generate_prompt(instruction, question, input_seg=None):
if input:
| # import textwrap
# from queue import Queue
# from threading import Thread
# import gradio as gr
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--base_model', type=str, default="/data1/pretrained-models/llama-7b-hf")
parser.add_argument('--cache_dir', type=str, default="./cache")
parser.add_argument('--context_size', type=int, default=-1, help='context size during fine-tuning')
parser.add_argument('--flash_attn', type=bool, default=False, help='')
parser.add_argument('--temperature', type=float, default=0.6, help='')
parser.add_argument('--top_p', type=float, default=0.9, help='')
parser.add_argument('--max_gen_len', type=int, default=512, help='')
parser.add_argument('--input_data_file', type=str, default='input_data/', help='')
parser.add_argument('--output_data_file', type=str, default='output_data/', help='')
args = parser.parse_args()
return args
def generate_prompt(instruction, question, input_seg=None):
if input: | return PROMPT_DICT["prompt_input"].format(instruction=instruction, input_seg=input_seg, question=question) | 1 | 2023-11-16 02:54:08+00:00 | 2k |
pytorch-labs/torchfix | tests/test_torchfix.py | [
{
"identifier": "TorchChecker",
"path": "torchfix/torchfix.py",
"snippet": "class TorchChecker:\n name = \"TorchFix\"\n version = __version__\n\n # The parameters need to have these exact names.\n # See https://flake8.pycqa.org/en/latest/plugin-development/plugin-parameters.html\n # `tree` is unused, but the plugin doesn't work without it.\n def __init__(self, tree, lines):\n # Filter out files that don't have \"torch\" string in them.\n # This avoids expensive parsing.\n MARKER = \"torch\" # this will catch import torch or functorch\n has_marker = False\n self.module = None\n for line in lines:\n if MARKER in line:\n has_marker = True\n break\n if has_marker:\n module = cst.parse_module(\"\".join(lines))\n self.module = cst.MetadataWrapper(module, unsafe_skip_copy=True)\n self.violations = []\n self.visitors = GET_ALL_VISITORS()\n\n def run(self):\n if self.module:\n self.module.visit_batched(self.visitors)\n for v in self.visitors:\n self.violations += v.violations\n for violation in self.violations:\n yield violation.flake8_result()\n\n @staticmethod\n def add_options(optmanager):\n optmanager.extend_default_ignore(DISABLED_BY_DEFAULT)"
},
{
"identifier": "TorchCodemod",
"path": "torchfix/torchfix.py",
"snippet": "class TorchCodemod(codemod.Codemod):\n def __init__(\n self,\n context: codemod.CodemodContext,\n config: Optional[TorchCodemodConfig] = None,\n ) -> None:\n super().__init__(context)\n self.config = config\n\n def transform_module_impl(self, module: cst.Module) -> cst.Module:\n # We use `unsafe_skip_copy`` here not only to save some time, but\n # because `deep_replace`` is identity-based and will not work on\n # the original module if the wrapper does a deep copy:\n # in that case we would need to use `wrapped_module.module`\n # instead of `module`.\n wrapped_module = cst.MetadataWrapper(module, unsafe_skip_copy=True)\n\n visitors = GET_ALL_VISITORS()\n violations = []\n needed_imports = []\n wrapped_module.visit_batched(visitors)\n for v in visitors:\n violations += v.violations\n needed_imports += v.needed_imports\n\n fixes_count = 0\n replacement_map = {}\n assert self.context.filename is not None\n for violation in violations:\n skip_violation = False\n if self.config is None or self.config.select != \"ALL\":\n for disabled_code in DISABLED_BY_DEFAULT:\n if violation.error_code.startswith(disabled_code):\n skip_violation = True\n break\n if skip_violation:\n continue\n\n if violation.replacement is not None:\n replacement_map[id(violation.node)] = violation.replacement\n fixes_count += 1\n try:\n path = Path(self.context.filename).relative_to(Path.cwd())\n except ValueError:\n # Not a subpath of a current dir, use absolute path\n path = Path(self.context.filename)\n print(f\"{path}{violation.codemod_result()}\")\n\n new_module = deep_multi_replace(module, replacement_map)\n\n add_imports_visitor = codemod.visitors.AddImportsVisitor(\n self.context, needed_imports\n )\n new_module = new_module.visit(add_imports_visitor)\n\n update_functorch_imports_visitor = _UpdateFunctorchImports()\n new_module = new_module.visit(update_functorch_imports_visitor)\n\n if fixes_count == 0 and not update_functorch_imports_visitor.changed:\n raise codemod.SkipFile(\"No changes\")\n\n return new_module"
},
{
"identifier": "TorchCodemodConfig",
"path": "torchfix/torchfix.py",
"snippet": "class TorchCodemodConfig:\n select: Optional[str] = None"
},
{
"identifier": "GET_ALL_VISITORS",
"path": "torchfix/torchfix.py",
"snippet": "def GET_ALL_VISITORS():\n return [\n TorchDeprecatedSymbolsVisitor(DEPRECATED_CONFIG_PATH),\n TorchRequireGradVisitor(),\n TorchSynchronizedDataLoaderVisitor(),\n TorchVisionDeprecatedPretrainedVisitor(),\n TorchVisionDeprecatedToTensorVisitor(),\n TorchUnsafeLoadVisitor(),\n TorchReentrantCheckpointVisitor(),\n ]"
}
] | from pathlib import Path
from torchfix.torchfix import (
TorchChecker,
TorchCodemod,
TorchCodemodConfig,
GET_ALL_VISITORS,
)
import logging
import libcst.codemod as codemod | 1,175 |
FIXTURES_PATH = Path(__file__).absolute().parent / "fixtures"
LOGGER = logging.getLogger(__name__)
def _checker_results(s):
checker = TorchChecker(None, s)
return [f"{line}:{col} {msg}" for line, col, msg, _ in checker.run()]
def _codemod_results(source_path):
with open(source_path) as source:
code = source.read()
|
FIXTURES_PATH = Path(__file__).absolute().parent / "fixtures"
LOGGER = logging.getLogger(__name__)
def _checker_results(s):
checker = TorchChecker(None, s)
return [f"{line}:{col} {msg}" for line, col, msg, _ in checker.run()]
def _codemod_results(source_path):
with open(source_path) as source:
code = source.read() | config = TorchCodemodConfig(select="ALL") | 2 | 2023-11-15 01:21:07+00:00 | 2k |
FISHers6/CodeLearn-Agent | codelearn/tools/file_content_view.py | [
{
"identifier": "Project",
"path": "codelearn/project/project.py",
"snippet": "class Project:\n\n def __init__(self, id: str, local_dir: str, source_content: FileTree, repo_url: str = None, last_updated_time = None):\n \"\"\"\n :param name: 项目名称\n :param contents: 一个字典,其中键是文件路径,值是文件内容\n \"\"\"\n self.id = id\n self.local_dir = local_dir\n self.repo_url = repo_url\n self.contents = source_content\n self.last_updated_time = last_updated_time"
},
{
"identifier": "process_file_paths",
"path": "codelearn/utils/file_util.py",
"snippet": "def process_file_paths(file_paths: str) -> List[str]:\n # 用于存储处理后的路径\n processed_paths = []\n \n # 使用正则表达式来分割字符串,处理多种可能的分隔符\n paths = re.split(r'[ ,;]+', file_paths)\n \n for path in paths:\n # 删除路径两侧可能存在的多余空格\n path = path.strip()\n \n if not path:\n continue # 跳过空字符串\n \n # 将路径分割为组件\n path_components = re.split(r'[\\\\/]', path)\n \n # 使用 os.path.join 和 os.sep 来连接路径组件\n normalized_path = os.path.join(*path_components)\n \n # 将处理后的路径添加到结果列表中\n processed_paths.append(normalized_path)\n \n # 移除重复的路径\n processed_paths = list(set(processed_paths))\n \n return processed_paths"
}
] | import json
from typing import List, Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools import BaseTool
from codelearn.project.project import Project
from codelearn.utils.file_util import process_file_paths | 649 |
class FileContentViewTool(BaseTool):
"""Tool to fetch and display detailed content of project files."""
name: str = "get_file_content"
description: str = (
"The 'get_file_content' tool fetches and displays detailed content of specified files within the project, including both source code and documentation. It's an important tool for users who need detailed from code source."
"Input a comma-separated list of file names (without folder or path names) to view. Incomplete paths are not accepted. For example swim-main/src/example.txt is a full path file, but 'src/example' is incomplete directory folder not allowed"
"Output is a dictionary with 'files' key containing a list of dictionaries for each file, "
"**Ensure you've requested the repository structure before asking for file contents.The requested file must exist in the project**"
"Useful for users diving deep into a project's codebase or documentation to understand its intricacies."
)
|
class FileContentViewTool(BaseTool):
"""Tool to fetch and display detailed content of project files."""
name: str = "get_file_content"
description: str = (
"The 'get_file_content' tool fetches and displays detailed content of specified files within the project, including both source code and documentation. It's an important tool for users who need detailed from code source."
"Input a comma-separated list of file names (without folder or path names) to view. Incomplete paths are not accepted. For example swim-main/src/example.txt is a full path file, but 'src/example' is incomplete directory folder not allowed"
"Output is a dictionary with 'files' key containing a list of dictionaries for each file, "
"**Ensure you've requested the repository structure before asking for file contents.The requested file must exist in the project**"
"Useful for users diving deep into a project's codebase or documentation to understand its intricacies."
) | project: Project | 0 | 2023-11-12 13:13:30+00:00 | 2k |
kaixinol/twitter_user_tweet_crawler | twitter_user_tweet_crawler/__main__.py | [
{
"identifier": "get_browser",
"path": "twitter_user_tweet_crawler/browser.py",
"snippet": "def get_browser(headless: bool = False) -> WebDriver:\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--blink-settings=imagesEnabled=false')\n chrome_options.add_argument('--disable-remote-fonts')\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--window-size=1200x600\"')\n if headless:\n chrome_options.add_argument('--headless')\n driver = webdriver.Chrome(options=chrome_options)\n driver.__dict__['is_using'] = False\n return driver"
},
{
"identifier": "get_multiple_browsers",
"path": "twitter_user_tweet_crawler/browser.py",
"snippet": "def get_multiple_browsers(count: int, headless: bool = False) -> list[WebDriver]:\n return [get_browser(headless) for _ in range(count)]"
},
{
"identifier": "ThreadPool",
"path": "twitter_user_tweet_crawler/pool.py",
"snippet": "class ThreadPool:\n browser: list[WebDriver]\n jobs: list[Callable] = []\n pool: ThreadPoolExecutor\n\n def __init__(self, browser: list[WebDriver], pool: ThreadPoolExecutor):\n self.browser = browser\n self.pool = pool\n\n def check_and_work(self):\n if not self.jobs:\n return\n for i in self.browser:\n if not i.__dict__['is_using']:\n i: WebDriver\n i.__dict__['is_using'] = True\n job = self.jobs.pop(0)\n callback: Future = self.pool.submit(job, i)\n callback.add_done_callback(lambda future: self._on_job_complete(i, callback))\n return\n\n def _on_job_complete(self, index, future):\n elements = self.browser.index(index)\n try:\n future.result()\n # By default, `concurrent.futures` will silently log errors but will not raise them\n # Throw the error directly\n finally:\n if slow_mode:\n sleep(30)\n self.browser[elements].__dict__['is_using'] = False\n self.check_and_work()"
},
{
"identifier": "config",
"path": "twitter_user_tweet_crawler/util/config.py",
"snippet": "class Config:\n def load(self, setting: dict | str | Path):\n def save(self):\n def __getitem__(self, item):\ndef set_work_directory(path: Path):"
}
] | import concurrent.futures
import json
from pathlib import Path
from time import sleep
from urllib.parse import urlparse
from loguru import logger
from rich.prompt import Confirm
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
from .browser import get_browser, get_multiple_browsers
from .pool import ThreadPool
from .util.config import config, work_directory, set_work_directory
from .tweet import Tweet | 807 |
def main():
cookie: list[dict]
work_list: list[WebDriver]
driver: WebDriver
def read_config() -> list[dict]:
with open(work_directory / 'cookie.json', 'r') as f:
return json.load(f)
def write_config(data: list[dict]):
with open(work_directory / 'cookie.json', 'w') as f:
json.dump(data, f)
def set_cookie(browser: WebDriver):
for i in cookie:
browser.add_cookie(i)
def get_executor(count: int | None = None):
return concurrent.futures.ThreadPoolExecutor(max_workers=count)
def get_items_need_handle():
return driver.find_elements(*selector)
selector = (By.XPATH, '//*/div[2]/div/div[3]/a[@role="link"]')
|
def main():
cookie: list[dict]
work_list: list[WebDriver]
driver: WebDriver
def read_config() -> list[dict]:
with open(work_directory / 'cookie.json', 'r') as f:
return json.load(f)
def write_config(data: list[dict]):
with open(work_directory / 'cookie.json', 'w') as f:
json.dump(data, f)
def set_cookie(browser: WebDriver):
for i in cookie:
browser.add_cookie(i)
def get_executor(count: int | None = None):
return concurrent.futures.ThreadPoolExecutor(max_workers=count)
def get_items_need_handle():
return driver.find_elements(*selector)
selector = (By.XPATH, '//*/div[2]/div/div[3]/a[@role="link"]') | (Path(config.save) / 'res').mkdir(exist_ok=True, parents=True) | 3 | 2023-11-12 11:40:26+00:00 | 2k |
kirill-vish/Beyond-INet | inference/modelvshuman/model_evaluator.py | [
{
"identifier": "load_model_transform",
"path": "utils/misc.py",
"snippet": "def load_model_transform(model_name, pretrained_dir, img_size=224):\n print(f\"Loading {model_name}\")\n checkpoint_path = None\n transform_val = None\n if model_name == \"deit3_21k\":\n model = models_deit.deit_base_patch16_LS(img_size=img_size)\n checkpoint_path = os.path.join(pretrained_dir,\n \"deit_3_base_224_21k.pth\")\n elif model_name == \"convnext_base_21k\":\n model = models_convnextv1.convnext_base()\n checkpoint_path = os.path.join(pretrained_dir,\n \"convnext_base_22k_1k_224.pth\")\n elif model_name == \"vit_clip\":\n model, _, transform_val = open_clip.create_model_and_transforms(\n 'ViT-B-16', pretrained='laion400m_e31', force_image_size=img_size)\n model = models_clip.CLIPModel(model=model, model_name='ViT-B-16')\n checkpoint_path = None\n elif model_name == \"convnext_clip\":\n model, _, transform_val = open_clip.create_model_and_transforms(\n 'convnext_base',\n pretrained='laion400m_s13b_b51k',\n force_image_size=img_size)\n model = models_clip.CLIPModel(model=model, model_name='convnext_base')\n checkpoint_path = None\n\n if checkpoint_path is not None:\n checkpoint = torch.load(checkpoint_path)\n state_dict = checkpoint['model']\n if img_size != 224 and model_name == 'deit3_21k':\n state_dict = interpolate_pos_embed(model, state_dict)\n msg = model.load_state_dict(state_dict, strict=False)\n print(msg)\n assert set(checkpoint['model'].keys()) == set(\n model.state_dict().keys())\n assert len(msg.missing_keys) == 0 and len(\n msg.unexpected_keys\n ) == 0, \"Some keys in the state dict do not match\"\n\n return model, transform_val"
},
{
"identifier": "evaluate",
"path": "inference/modelvshuman/evaluation/evaluate.py",
"snippet": "IMAGENET_LABEL_FILE = pjoin(c.CODE_DIR, \"evaluation\", \"imagenet_labels.txt\")\ndef print_performance_to_csv(model_name,\n dataset_name,\n performance,\n metric_name,\n data_parent_dir=c.PERFORMANCES_DIR):\ndef print_predictions_to_console(softmax_output,\n top_n=5,\n labels_path=IMAGENET_LABEL_FILE):\n def __init__(self, model_name, dataset, data_parent_dir=c.RAW_DATA_DIR):\n def create_session_csv(self, session):\n def print_batch_to_csv(self, object_response, batch_targets, paths):\nclass ResultPrinter():"
},
{
"identifier": "load_dataset",
"path": "inference/modelvshuman/utils.py",
"snippet": "def load_dataset(name, *args, **kwargs):\n default_kwargs = {\"batch_size\": 16, \"num_workers\": 4}\n kwargs = {**default_kwargs, **kwargs}\n logger.info(f\"Loading dataset {name}\")\n supported_datasets = dataset_module.list_datasets()\n module_name = supported_datasets.get(name, None)\n if module_name is None:\n raise NameError(\n f\"Dataset {name} is not supported, \"\n f\"please select from {list(supported_datasets.keys())}\")\n elif os.path.exists(join(c.DATASET_DIR, name)):\n return eval(f\"dataset_module.{module_name}\")(*args, **kwargs)\n elif try_download_dataset_from_github(name):\n return eval(f\"dataset_module.{module_name}\")(*args, **kwargs)\n else:\n raise NotImplementedError(\n f\"Dataset {name} not available for download, please obtain the dataset \"\n f\"yourself and save it to {c.DATASET_DIR}\")"
},
{
"identifier": "load_model",
"path": "inference/modelvshuman/utils.py",
"snippet": "def load_model(model_name, *args):\n if model_name in zoomodels.__dict__:\n model = eval(\"pytorch_model_zoo.model_pytorch\")(model_name, *args)\n framework = 'pytorch'\n else:\n model = eval(f\"pytorch_model_zoo.model_timm\")(model_name, *args)\n framework = 'pytorch'\n return model, framework"
}
] | import copy
import datetime
import logging
import os
import matplotlib as mpl
import torch
from torch.nn.functional import softmax
from tqdm import tqdm
from utils.misc import load_model_transform
from .evaluation import evaluate as e
from .utils import load_dataset, load_model | 1,409 |
logger = logging.getLogger(__name__)
MAX_NUM_MODELS_IN_CACHE = 3
mpl.rcParams['font.size'] = 22
def device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ModelEvaluator:
def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs):
"""
Evaluate Model on the given dataset and return the accuracy.
Args:
model_name:
model:
dataset:
*args:
**kwargs:
"""
logging_info = f"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator"
logger.info(logging_info)
print(logging_info)
for metric in dataset.metrics:
metric.reset()
with torch.no_grad():
result_writer = e.ResultPrinter(model_name=model_name,
dataset=dataset)
for images, target, paths in tqdm(dataset.loader):
images = images.to(device())
if "forward_batch" in dir(model):
logits = model.forward_batch(images)
softmax_output = model.softmax(logits)
else:
logits = model(images)
softmax_output = softmax(logits,
dim=1).detach().cpu().numpy()
if isinstance(target, torch.Tensor):
batch_targets = model.to_numpy(target)
else:
batch_targets = target
predictions = dataset.decision_mapping(softmax_output)
for metric in dataset.metrics:
metric.update(predictions, batch_targets, paths)
if kwargs["print_predictions"]:
result_writer.print_batch_to_csv(
object_response=predictions,
batch_targets=batch_targets,
paths=paths)
def _get_datasets(self, dataset_names, *args, **kwargs):
dataset_list = []
for dataset in dataset_names:
|
logger = logging.getLogger(__name__)
MAX_NUM_MODELS_IN_CACHE = 3
mpl.rcParams['font.size'] = 22
def device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ModelEvaluator:
def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs):
"""
Evaluate Model on the given dataset and return the accuracy.
Args:
model_name:
model:
dataset:
*args:
**kwargs:
"""
logging_info = f"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator"
logger.info(logging_info)
print(logging_info)
for metric in dataset.metrics:
metric.reset()
with torch.no_grad():
result_writer = e.ResultPrinter(model_name=model_name,
dataset=dataset)
for images, target, paths in tqdm(dataset.loader):
images = images.to(device())
if "forward_batch" in dir(model):
logits = model.forward_batch(images)
softmax_output = model.softmax(logits)
else:
logits = model(images)
softmax_output = softmax(logits,
dim=1).detach().cpu().numpy()
if isinstance(target, torch.Tensor):
batch_targets = model.to_numpy(target)
else:
batch_targets = target
predictions = dataset.decision_mapping(softmax_output)
for metric in dataset.metrics:
metric.update(predictions, batch_targets, paths)
if kwargs["print_predictions"]:
result_writer.print_batch_to_csv(
object_response=predictions,
batch_targets=batch_targets,
paths=paths)
def _get_datasets(self, dataset_names, *args, **kwargs):
dataset_list = []
for dataset in dataset_names: | dataset = load_dataset(dataset, *args, **kwargs) | 2 | 2023-11-15 22:22:06+00:00 | 2k |
shengliu66/ICV | utils/context_manager.py | [
{
"identifier": "ForwardTracer",
"path": "utils/forward_tracer.py",
"snippet": "class ForwardTracer:\n def __init__(self, model: PreTrainedModel, forward_trace: ForwardTrace, with_submodules: bool = False):\n self._model = model\n self._forward_trace = forward_trace\n self._with_submodules = with_submodules\n\n self._layers = get_layers(model)\n self._attn_layers = get_attention_layers(model)\n self._mlp_layers = get_mlp_layers(model)\n\n self._hooks = []\n\n def __enter__(self):\n self._register_forward_hooks()\n\n def __exit__(self, exc_type, exc_value, traceback):\n for hook in self._hooks:\n hook.remove()\n\n if exc_type is None:\n residual_stream = self._forward_trace.residual_stream\n\n if residual_stream.hidden[0] == []:\n residual_stream.hidden.pop(0)\n\n for key in residual_stream.__dataclass_fields__.keys():\n acts = getattr(residual_stream, key)\n # TODO: this is a hack, fix it\n if key != \"hidden\" and not self._with_submodules:\n continue\n\n nonempty_layer_acts = [layer_acts for layer_acts in acts if layer_acts != []][0]\n final_shape = torch.cat(nonempty_layer_acts, dim=0).shape\n\n for i, layer_acts in enumerate(acts):\n if layer_acts == []:\n acts[i] = torch.zeros(final_shape)\n else:\n acts[i] = torch.cat(layer_acts, dim=0)\n acts = torch.stack(acts).transpose(0, 1)\n setattr(residual_stream, key, acts)\n\n\n # if self._with_submodules:\n # self._forward_trace.attentions = torch.stack(self._forward_trace.attentions).transpose(0, 1)\n # else:\n self._forward_trace.attentions = None\n\n def _register_forward_hooks(self):\n model = self._model\n hooks = self._hooks\n\n residual_stream = self._forward_trace.residual_stream\n\n def store_activations(residual_stream: ResidualStream, acts_type: str, layer_num: int):\n def hook(model, inp, out):\n if isinstance(out, tuple):\n out = out[0]\n out = out.float().to(\"cpu\", non_blocking=True)\n\n acts = getattr(residual_stream, acts_type)\n while len(acts) < layer_num + 1:\n acts.append([])\n try:\n acts[layer_num].append(out)\n except IndexError:\n print(len(acts), layer_num)\n\n return hook\n\n def store_attentions(layer_num):\n def hook(model, inp, out):\n attention_maps = out[1]\n attention_maps = attention_maps.to(\"cpu\", non_blocking=True).float()\n self._forward_trace.attentions[layer_num] = attention_maps\n\n return hook\n\n embedding_hook = get_embedding_layer(self._model).register_forward_hook(\n store_activations(residual_stream, \"hidden\", 0)\n )\n hooks.append(embedding_hook)\n\n for i, layer in enumerate(self._layers):\n\n hidden_states_hook = layer.register_forward_hook(store_activations(residual_stream, \"hidden\", i + 1))\n hooks.append(hidden_states_hook)\n\n if self._with_submodules:\n for i, mlp_layer in enumerate(self._mlp_layers):\n mlp_res_hook = mlp_layer.register_forward_hook(store_activations(residual_stream, \"mlp\", i))\n hooks.append(mlp_res_hook)\n\n for i, attn_layer in enumerate(self._attn_layers):\n attn_res_hook = attn_layer.register_forward_hook(store_activations(residual_stream, \"attn\", i))\n hooks.append(attn_res_hook)\n # attn_attentions_hook = attn_layer.register_forward_hook(store_attentions(i))\n # hooks.append(attn_attentions_hook)"
},
{
"identifier": "ForwardTrace",
"path": "utils/forward_tracer.py",
"snippet": "class ForwardTrace:\n def __init__(self):\n self.residual_stream: Optional[ResidualStream] = ResidualStream(\n hidden=[],\n attn=[],\n mlp=[],\n )\n self.attentions: Optional[torch.Tensor] = None"
}
] | import os
from contextlib import AbstractContextManager, ExitStack
from typing import Iterable
from utils.forward_tracer import ForwardTracer, ForwardTrace | 1,209 |
class CombinedContextManager(AbstractContextManager):
def __init__(self, context_managers):
self.context_managers = context_managers
self.stack = None
def __enter__(self):
self.stack = ExitStack()
for cm in self.context_managers:
self.stack.enter_context(cm)
return self.stack
def __exit__(self, exc_type, exc_val, exc_tb):
if self.stack is not None:
self.stack.__exit__(exc_type, exc_val, exc_tb)
def modified_forward_context_manager(model, forward_modifiers=()):
context_manager = CombinedContextManager([*forward_modifiers])
return context_manager
def traced_forward_context_manager(model, with_submodules=False):
|
class CombinedContextManager(AbstractContextManager):
def __init__(self, context_managers):
self.context_managers = context_managers
self.stack = None
def __enter__(self):
self.stack = ExitStack()
for cm in self.context_managers:
self.stack.enter_context(cm)
return self.stack
def __exit__(self, exc_type, exc_val, exc_tb):
if self.stack is not None:
self.stack.__exit__(exc_type, exc_val, exc_tb)
def modified_forward_context_manager(model, forward_modifiers=()):
context_manager = CombinedContextManager([*forward_modifiers])
return context_manager
def traced_forward_context_manager(model, with_submodules=False): | forward_trace = ForwardTrace() | 1 | 2023-11-11 18:20:45+00:00 | 2k |
Mohamad-Hussein/speech-assistant | src/model_inference.py | [
{
"identifier": "find_gpu_config",
"path": "src/funcs.py",
"snippet": "def find_gpu_config(logger):\n \"\"\"\n Finds the GPU config and returns the device, device name and torch_dtype\n based on GPU platform and availability.\n\n Args:\n logger (logging.Logger): Logger instance to log messages onto model.log (for Windows)\n\n Returns:\n device (str): Device type, either cuda:0, cpu, or ...\n device_name (str): Device name\n torch_dtype (torch.dtype): Data type for torch, float16 for GPU, float32 for CPU\n\n \"\"\"\n import torch\n from torch import cuda\n from torch import float16, float32\n\n logger.debug(\"Checking for GPU config\")\n\n # Assume, then check\n device = torch.device(\"cuda:0\" if cuda.is_available() else \"cpu\")\n torch_dtype = float16 if cuda.is_available() else float32\n device_name = \"\"\n\n # CUDA\n if cuda.is_available():\n # Debugging made easier\n device_name = cuda.get_device_name()\n logger.debug(\"GPU detected from cuda\")\n logger.info(f\"Device: {device}\")\n logger.info(f\"Device name: {cuda.get_device_name()}\")\n logger.info(f\"Device properties: {cuda.get_device_properties(device)}\")\n logger.info(f\"Device count: {cuda.device_count()}\")\n logger.info(f\"Device capability: {cuda.get_device_capability()}\")\n logger.info(f\"Current memory allocated: {cuda.mem_get_info()}\")\n\n # AMD\n else:\n try:\n import torch_directml as dml\n \n if dml.is_available():\n torch_dtype = float16\n device = dml.device()\n device_name = dml.device_name(dml.default_device())\n\n logger.debug(\"GPU detected from torch_directml\")\n logger.info(f\"Available: {dml.is_available()}\")\n logger.info(f\"Devices Available: {dml.device_count()}\")\n logger.info(f\"Device: {device}\")\n logger.info(f\"Default device: {dml.default_device()}\")\n logger.info(f\"Device name: {dml.device_name(0)}\")\n logger.info(f\"GPU memory: {dml.gpu_memory()}\")\n else:\n torch_dtype = float32\n\n logger.debug(\"No GPU detected, using cpu\")\n logger.warning(\n \"Attention, using the CPU is not recommended! Computation time will be long.\"\n )\n\n # Use CPU if directml is not installed\n except Exception:\n logger.debug(f\"Package directml not found\")\n torch_dtype = float32\n\n logger.debug(\"No GPU detected, using cpu\")\n logger.warning(\n \"Attention, using the CPU is not recommended! Computation time will be long.\"\n )\n\n logger.info(\n f\"GPU config -- device: {device}, device name: {device_name}, torch_dtype: {torch_dtype}\"\n )\n return device, device_name, torch_dtype"
},
{
"identifier": "process_text",
"path": "src/funcs.py",
"snippet": "def process_text(text: str):\n \"\"\"\n Processes the text to not type dictation\n in which the user has not said anything\n\n Args:\n text (str): The text to be processed\n\n Returns:\n text (str): The processed text\n \"\"\"\n processed = text\n\n if text.strip().lower() in \"you're not.\":\n processed = \"\"\n\n return processed"
},
{
"identifier": "type_writing",
"path": "src/funcs.py",
"snippet": "def type_writing(text):\n \"\"\"\n Types the text onto the screen.\n Downside is that it is slow and activates\n other hotkeys if you hold windows\n due to it being real keystrokes.\n\n Args:\n text (str): The text to be typed\n\n Returns:\n None\n \"\"\"\n typewrite(text)"
},
{
"identifier": "copy_writing",
"path": "src/funcs.py",
"snippet": "def copy_writing(text):\n \"\"\"\n Copies the text to the clipboard and writes it.\n\n Args:\n text (str): The text to be copied and written\n\n Returns:\n None\n \"\"\"\n copy(text)\n hotkey(\"ctrl\", \"v\")"
}
] | from sys import exit
from os.path import join
from time import sleep, time
from src.funcs import find_gpu_config, process_text
from src.funcs import type_writing, copy_writing
from transformers.pipelines import pipeline
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
from optimum.bettertransformer import BetterTransformer
import logging | 1,341 |
# from optimum.onnxruntime import ORTModelForSpeechSeq2Seq
# from optimum.nvidia.pipelines import pipeline
# MODEL_ID = "openai/whisper-tiny.en" # ~400 MiB of GPU memory
MODEL_ID = "distil-whisper/distil-small.en" # ~500-700 MiB of GPU memory
# MODEL_ID = "distil-whisper/distil-medium.en" # ~900-1500 MiB of GPU memory
# MODEL_ID = "distil-whisper/distil-large-v2" # ~1700-2000 MiB of GPU memory
# MODEL_ID = "openai/whisper-large-v3" # ~4000 MiB of GPU memory
# MODEL_ID = "optimum/whisper-tiny.en" # ~400 MiB of GPU memory
# Choosing which way to write text.
WRITE = type_writing
def service(queue, event):
# Configure the logging settings
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
filename=join("logs", "model.log"),
filemode="w",
)
logger = logging.getLogger(__name__)
# Checking for GPU
|
# from optimum.onnxruntime import ORTModelForSpeechSeq2Seq
# from optimum.nvidia.pipelines import pipeline
# MODEL_ID = "openai/whisper-tiny.en" # ~400 MiB of GPU memory
MODEL_ID = "distil-whisper/distil-small.en" # ~500-700 MiB of GPU memory
# MODEL_ID = "distil-whisper/distil-medium.en" # ~900-1500 MiB of GPU memory
# MODEL_ID = "distil-whisper/distil-large-v2" # ~1700-2000 MiB of GPU memory
# MODEL_ID = "openai/whisper-large-v3" # ~4000 MiB of GPU memory
# MODEL_ID = "optimum/whisper-tiny.en" # ~400 MiB of GPU memory
# Choosing which way to write text.
WRITE = type_writing
def service(queue, event):
# Configure the logging settings
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
filename=join("logs", "model.log"),
filemode="w",
)
logger = logging.getLogger(__name__)
# Checking for GPU | device, device_name, torch_dtype = find_gpu_config(logger) | 0 | 2023-11-12 01:20:50+00:00 | 2k |
Fraunhofer-SCAI/corr_shap | corr_shap/CorrExplainer.py | [
{
"identifier": "SamplingStrategy",
"path": "corr_shap/sampling/SamplingStrategy.py",
"snippet": "class SamplingStrategy:\n def __init__(self, explainer, **kwargs):\n \"\"\" Construct all necessary attributes for the SamplingStrategy object.\"\"\"\n self.data = explainer.data.data\n self.data_weights = explainer.data.weights\n self.data_weight_sum = np.sum(self.data_weights)\n self.N = explainer.N # num samples in self.data\n\n def sample(self, m):\n \"\"\"\n Return prepared sample data.\n These data have fixed features for those contained in subset (m=1) and normalized weights.\n\n :param m: given mask of subset\n :return: samples with fixed masked features and normalized weights\n \"\"\"\n x = self.x\n samples = self.data.copy()\n samples = self.set_masked_features_to_instance(m, x, samples)\n weights = self.normalize(self.data_weights)\n return samples, weights\n\n def normalize(self, weights):\n \"\"\" Normalize weights by their sum\"\"\"\n if self.data_weight_sum != 0:\n weights = weights/self.data_weight_sum\n return weights\n\n def set_masked_features_to_instance(self, m, x, samples):\n \"\"\"\n Set masked features for subset to given instance.\n\n :param m: given mask of subset\n :param x: given instance to be explained\n :param samples: background data that are the basis for the sample\n :return: samples with fixed masked features\n \"\"\"\n if isinstance(self.varyingFeatureGroups, (list,)):\n for j in range(self.varyingFeatureGroups.shape[0]):\n for k in self.varyingFeatureGroups[j]:\n if m[j] == 1.0:\n samples[:, k] = x[0, k]\n else:\n # for non-jagged numpy array we can significantly boost performance\n mask = m == 1.0\n groups = self.varyingFeatureGroups[mask]\n if len(groups.shape) == 2:\n for group in groups:\n samples[:, group] = x[0, group]\n else:\n # further performance optimization in case each group has a single feature\n evaluation_data = x[0, groups]\n samples[:, groups] = evaluation_data\n return samples\n\n def set_instance(self, instance):\n \"\"\" Set instance to x. \"\"\"\n self.x = instance.x.copy()\n\n\n def set_varying_feature_groups(self, varying_groups):\n \"\"\" Set indicies of varying feature groups.\"\"\"\n self.varyingFeatureGroups = varying_groups"
},
{
"identifier": "get_sampling_strategy",
"path": "corr_shap/sampling/sampling_factory.py",
"snippet": "def get_sampling_strategy(type, explainer, kwargs):\n \"\"\"Assign the sampling strategy method to the explainer based on the given type. \"\"\"\n sampling_strategies = {\"default\": SamplingStrategy, \"gauss\": GaussStrategy, \"copula\": CopulaStrategy,\n \"empirical\": EmpiricalStrategy, \"gauss+empirical\": GaussEmpiricalStrategy,\n \"copula+empirical\": CopulaEmpiricalStrategy}\n return sampling_strategies[type](explainer=explainer, **kwargs)"
}
] | from scipy.special import binom
from scipy import sparse
from shap.utils._legacy import convert_to_instance, match_instance_to_data, IdentityLink
from shap.explainers._explainer import Explainer
from shap.explainers._kernel import KernelExplainer
from shap.explainers._kernel import Kernel as KernelExplainer
from corr_shap.sampling.SamplingStrategy import SamplingStrategy
from corr_shap.sampling.sampling_factory import get_sampling_strategy
import numpy as np
import pandas as pd
import logging
import copy
import itertools
import typing
import warnings | 986 |
try:
except ImportError:
log = logging.getLogger('corr_shap')
class CorrExplainer(KernelExplainer):
"""Uses the modified Kernel SHAP method to explain the output of any function.
The modifications (based on the paper 'Explaining individual predictions when features are dependent:
More accurate approximations to Shapley values' by Kjersti Aas, Martin Jullum and Anders Løland)
offer the possibility to include dependencies between features.
There are 3 different approaches, which are described in the following sampling strategies.
"""
|
try:
except ImportError:
log = logging.getLogger('corr_shap')
class CorrExplainer(KernelExplainer):
"""Uses the modified Kernel SHAP method to explain the output of any function.
The modifications (based on the paper 'Explaining individual predictions when features are dependent:
More accurate approximations to Shapley values' by Kjersti Aas, Martin Jullum and Anders Løland)
offer the possibility to include dependencies between features.
There are 3 different approaches, which are described in the following sampling strategies.
"""
| def __init__(self, model, data, link=IdentityLink(), sampling: typing.Union[str, SamplingStrategy]="default", sampling_kwargs={}, **kwargs): | 0 | 2023-11-14 08:56:18+00:00 | 2k |
codereport/jello | jello.py | [
{
"identifier": "Grid",
"path": "grid.py",
"snippet": "class Grid:\n def __init__(self, n):\n self.n = n * 2\n self.grid = [[\" \"] * self.n, [\" \"] * self.n]\n\n def add_level(self):\n self.grid.append([\" \"] * self.n)\n self.grid.append([\" \"] * self.n)\n\n def add_subtree(self, level, start, end, s):\n if s in [\"W\", \"m\", \"d\"]:\n self.grid[level * 2 ][start] = VERT\n self.grid[level * 2 + 1][start] = s\n return\n if (level + 1) * 2 > len(self.grid):\n self.add_level()\n mid = (start + end) // 2\n self.grid[level * 2][start ] = START\n self.grid[level * 2][end ] = END\n self.grid[level * 2][start + 1:end ] = list(HORIZ * (end - start -1 ))\n self.grid[level * 2][(start + end) // 2] = MID\n self.grid[level * 2 + 1][mid - len(s) // 2:mid - len(s) // 2 + len(s)] = list(s)\n\n def fill_in_vertical_bars(self):\n for column in range(0, self.n):\n found_start_end = False\n for row in reversed(range(len(self.grid))):\n c = self.grid[row][column]\n if c in [START, END]:\n found_start_end = True\n elif found_start_end:\n if c == \" \":\n self.grid[row][column] = \"⋮\" # │ alternative\n else:\n found_start_end = False\n\n # combinator chain sequence\n def ccs(self):\n first_two = \"\".join(\"\".join(row).strip()[0:2] for row in self.grid)\n no_bars = \"\".join(c for c in first_two if c not in \"─└ ⋮┬│\")\n while \"h₁\" in no_bars:\n no_bars = no_bars.replace(\"h₁\", \"\")\n return no_bars\n\n def display(self, indent = 0):\n for row in self.grid:\n print(\" \" * indent + \"\".join(row))"
},
{
"identifier": "Chain",
"path": "utils.py",
"snippet": "class Chain(Enum):\n MONADIC = 1\n DYADIC = 2"
},
{
"identifier": "Quick",
"path": "utils.py",
"snippet": "class Quick(Enum):\n QUICK = 3\n EACH = 10\n FLIP = 50"
},
{
"identifier": "Separator",
"path": "utils.py",
"snippet": "class Separator(Enum):\n MONADIC = 20\n DYADIC = 21"
}
] | import subprocess
import algorithm
import arity_notation
import draw
import tokens
import utils
from colorama import Fore, init
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import FileHistory
from prompt_toolkit.shortcuts import CompleteStyle
from grid import Grid
from utils import Chain, Quick, Separator | 1,201 | #!/usr/bin/env python3
def clear_screen():
subprocess.call("clear", shell=True)
def run_jelly(expr: str, args: list[str]):
try:
command = ["jelly", "eun", expr, *args]
result = subprocess.run(command, text=True, capture_output=True, check=True)
output_text = result.stdout.strip()
draw.cprint(output_text, Fore.GREEN, True)
except subprocess.CalledProcessError as e:
# Print the stderr output for more information about the error
print(Fore.RED + f"Error: {e}")
print(Fore.RED + "stderr:", e.stderr)
completer = WordCompleter(
[k for k in sorted(
list(tokens.niladic.keys()) +
list(tokens.monadic.keys()) +
list(tokens.dyadic.keys()) +
list(tokens.quick.keys()) +
list(tokens.separators.keys())) if len(k) > 1])
history = FileHistory("jello_history.txt")
def is_nilad_array(s: str) -> bool:
return set(list(s)).issubset(list("0123456789,[]"))
def to_jelly(token: str) -> str:
if token in tokens.monadic: return tokens.monadic[token]
if token in tokens.dyadic: return tokens.dyadic[token]
if token in tokens.niladic: return tokens.niladic[token]
if token in tokens.quick: return tokens.quick[token]
if token in tokens.separators: return tokens.separators[token]
if is_nilad_array(token): return token
raise Exception(f"{token} is not a valid Jello keyword.")
def convert(expr: list[str]) -> str:
return "".join([to_jelly(t) for t in expr])
def keyword_arity(k: str) -> int:
if k in tokens.niladic: return 0
if k in tokens.monadic: return 1
if k in tokens.dyadic: return 2
if k == "each": return Quick.EACH
if k == "c": return Quick.FLIP
if k in tokens.quick: return Quick.QUICK
| #!/usr/bin/env python3
def clear_screen():
subprocess.call("clear", shell=True)
def run_jelly(expr: str, args: list[str]):
try:
command = ["jelly", "eun", expr, *args]
result = subprocess.run(command, text=True, capture_output=True, check=True)
output_text = result.stdout.strip()
draw.cprint(output_text, Fore.GREEN, True)
except subprocess.CalledProcessError as e:
# Print the stderr output for more information about the error
print(Fore.RED + f"Error: {e}")
print(Fore.RED + "stderr:", e.stderr)
completer = WordCompleter(
[k for k in sorted(
list(tokens.niladic.keys()) +
list(tokens.monadic.keys()) +
list(tokens.dyadic.keys()) +
list(tokens.quick.keys()) +
list(tokens.separators.keys())) if len(k) > 1])
history = FileHistory("jello_history.txt")
def is_nilad_array(s: str) -> bool:
return set(list(s)).issubset(list("0123456789,[]"))
def to_jelly(token: str) -> str:
if token in tokens.monadic: return tokens.monadic[token]
if token in tokens.dyadic: return tokens.dyadic[token]
if token in tokens.niladic: return tokens.niladic[token]
if token in tokens.quick: return tokens.quick[token]
if token in tokens.separators: return tokens.separators[token]
if is_nilad_array(token): return token
raise Exception(f"{token} is not a valid Jello keyword.")
def convert(expr: list[str]) -> str:
return "".join([to_jelly(t) for t in expr])
def keyword_arity(k: str) -> int:
if k in tokens.niladic: return 0
if k in tokens.monadic: return 1
if k in tokens.dyadic: return 2
if k == "each": return Quick.EACH
if k == "c": return Quick.FLIP
if k in tokens.quick: return Quick.QUICK | if k == ".": return Separator.MONADIC | 3 | 2023-11-18 17:34:06+00:00 | 2k |
mMrBun/Chat2BI | llms/chatglm3/code_interpreter.py | [
{
"identifier": "preprocess_text",
"path": "llms/chatglm3/conversation.py",
"snippet": "def preprocess_text(\n system: str | None,\n tools: list[dict] | None,\n history: list[Conversation],\n) -> str:\n if tools:\n tools = json.dumps(tools, indent=4, ensure_ascii=False)\n\n prompt = f\"{Role.SYSTEM}\\n\"\n prompt += system if not tools else TOOL_PROMPT\n if tools:\n tools = json.loads(tools)\n prompt += json.dumps(tools, ensure_ascii=False)\n for conversation in history:\n prompt += f'{conversation}'\n prompt += f'{Role.ASSISTANT}\\n'\n return prompt"
},
{
"identifier": "Conversation",
"path": "llms/chatglm3/conversation.py",
"snippet": "class Conversation:\n role: Role\n content: str\n tool: str | None = None\n image: Image | None = None\n\n def __str__(self) -> str:\n print(self.role, self.content, self.tool)\n match self.role:\n case Role.SYSTEM | Role.USER | Role.ASSISTANT | Role.OBSERVATION:\n return f'{self.role}\\n{self.content}'\n case Role.TOOL:\n return f'{self.role}{self.tool}\\n{self.content}'\n case Role.INTERPRETER:\n return f'{self.role}interpreter\\n{self.content}'\n\n # Human readable format\n def get_text(self) -> str:\n text = postprocess_text(self.content)\n match self.role.value:\n case Role.TOOL.value:\n text = f'Calling tool `{self.tool}`:\\n{text}'\n case Role.INTERPRETER.value:\n text = f'{text}'\n case Role.OBSERVATION.value:\n text = f'Observation:\\n```\\n{text}\\n```'\n return text"
},
{
"identifier": "Role",
"path": "llms/chatglm3/conversation.py",
"snippet": "class Role(Enum):\n SYSTEM = auto()\n USER = auto()\n ASSISTANT = auto()\n TOOL = auto()\n INTERPRETER = auto()\n OBSERVATION = auto()\n\n def __str__(self):\n match self:\n case Role.SYSTEM:\n return \"<|system|>\"\n case Role.USER:\n return \"<|user|>\"\n case Role.ASSISTANT | Role.TOOL | Role.INTERPRETER:\n return \"<|assistant|>\"\n case Role.OBSERVATION:\n return \"<|observation|>\""
},
{
"identifier": "extract_code",
"path": "core/build_tools/utils.py",
"snippet": "def extract_code(text: str):\n try:\n pattern = r'```([^\\n]*)\\n(.*?)```'\n matches = re.findall(pattern, text, re.DOTALL)\n return matches[-1][1]\n except Exception as e:\n return None"
}
] | from llms.chatglm3.conversation import preprocess_text, Conversation, Role
from core.build_tools.utils import extract_code | 822 |
SYSTEM_PROMPT = ('你是一位智能AI助手,你叫ChatGLM,你连接着一台电脑,但请注意不能联网。在使用Python'
'解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑上的文件,文件默认存储路径是/mnt/data/。')
MAX_LENGTH = 8192
TRUNCATE_LENGTH = 1024
def is_valid_python(code: str) -> bool:
try:
|
SYSTEM_PROMPT = ('你是一位智能AI助手,你叫ChatGLM,你连接着一台电脑,但请注意不能联网。在使用Python'
'解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑上的文件,文件默认存储路径是/mnt/data/。')
MAX_LENGTH = 8192
TRUNCATE_LENGTH = 1024
def is_valid_python(code: str) -> bool:
try: | code = extract_code(code) | 3 | 2023-11-15 11:49:50+00:00 | 2k |
compphoto/Intrinsic | intrinsic/pipeline.py | [
{
"identifier": "base_resize",
"path": "intrinsic/ordinal_util.py",
"snippet": "def base_resize(img, base_size=384):\n \"\"\"TODO DESCRIPTION\n\n params:\n img (TODO): TODO\n base_size (int) optional: TODO (default 384)\n\n returns:\n net_input (TODO): TODO\n \"\"\"\n h, w, _ = img.shape\n\n max_dim = max(h, w)\n scale = base_size / max_dim\n\n new_h, new_w = scale * h, scale * w\n new_h, new_w = round_32(new_h), round_32(new_w)\n\n net_input = resize(img, (new_h, new_w, 3), anti_aliasing=True)\n return net_input"
},
{
"identifier": "equalize_predictions",
"path": "intrinsic/ordinal_util.py",
"snippet": "def equalize_predictions(img, base, full, p=0.5):\n \"\"\"TODO DESCRIPTION\n\n params:\n img (TODO): TODO\n base (TODO): TODO\n full (TODO): TODO\n p (int) optional: TODO (default 0.5)\n\n returns:\n base (TODO): TODO\n new_full (TODO): TODO\n \"\"\"\n h, w, _ = img.shape\n\n full_shd = (1. / full.clip(1e-5)) - 1.\n base_shd = (1. / base.clip(1e-5)) - 1.\n \n full_alb = get_brightness(img) / full_shd.clip(1e-5)\n base_alb = get_brightness(img) / base_shd.clip(1e-5)\n\n rand_msk = (np.random.randn(h, w) > p).astype(np.uint8)\n\n flat_full_alb = full_alb[rand_msk == 1]\n flat_base_alb = base_alb[rand_msk == 1]\n\n scale, _, _, _ = np.linalg.lstsq(flat_full_alb.reshape(-1, 1), flat_base_alb, rcond=None)\n\n new_full_alb = scale * full_alb\n new_full_shd = get_brightness(img) / new_full_alb.clip(1e-5)\n new_full = 1.0 / (1.0 + new_full_shd)\n\n return base, new_full"
}
] | import torch
import numpy as np
from skimage.transform import resize
from chrislib.resolution_util import optimal_resize
from chrislib.general import round_32, uninvert
from intrinsic.ordinal_util import base_resize, equalize_predictions | 1,440 |
def run_pipeline(
models,
img_arr,
output_ordinal=False,
resize_conf=0.0,
base_size=384,
maintain_size=False,
linear=False,
device='cuda',
lstsq_p=0.0,
inputs='all'):
"""Runs the complete pipeline for shading and albedo prediction
params:
models (dict): models dictionary returned by model_util.load_models()
img_arr (np.array): RGB input image as numpy array between 0-1
output_ordinal (bool) optional: whether or not to output intermediate ordinal estimations
(default False)
resize_conf (float) optional: confidence to use for resizing (between 0-1) if None maintain
original size (default None)
base_size (int) optional: size of the base resolution estimation (default 384)
maintain_size (bool) optional: whether or not the results match the input image size
(default False)
linear (bool) optional: whether or not the input image is already linear (default False)
device (str) optional: string representing device to use for pipeline (default "cuda")
lstsq_p (float) optional: subsampling factor for computing least-squares fit
when matching the scale of base and full estimations (default 0.0)
inputs (str) optional: network inputs ("full", "base", "rgb", "all") the rgb image is
always included (default "all")
returns:
results (dict): a result dictionary with albedo, shading and potentiall ordinal estimations
"""
results = {}
orig_h, orig_w, _ = img_arr.shape
# if no confidence value set, just round original size to 32 for model input
if resize_conf is None:
img_arr = resize(img_arr, (round_32(orig_h), round_32(orig_w)), anti_aliasing=True)
# if a the confidence is an int, just rescale image so that the large side
# of the image matches the specified integer value
elif isinstance(resize_conf, int):
scale = resize_conf / max(orig_h, orig_w)
img_arr = resize(
img_arr,
(round_32(orig_h * scale), round_32(orig_w * scale)),
anti_aliasing=True)
# if the confidence is a float use the optimal resize code from Miangoleh et al.
elif isinstance(resize_conf, float):
img_arr = optimal_resize(img_arr, conf=resize_conf)
fh, fw, _ = img_arr.shape
# if the image is in sRGB we do simple linearization using gamma=2.2
if not linear:
lin_img = img_arr ** 2.2
else:
lin_img = img_arr
with torch.no_grad():
# ordinal shading estimation --------------------------
# resize image for base and full estimations and send through ordinal net
base_input = base_resize(lin_img, base_size)
full_input = lin_img
base_input = torch.from_numpy(base_input).permute(2, 0, 1).to(device).float()
full_input = torch.from_numpy(full_input).permute(2, 0, 1).to(device).float()
base_out = models['ordinal_model'](base_input.unsqueeze(0)).squeeze(0)
full_out = models['ordinal_model'](full_input.unsqueeze(0)).squeeze(0)
# the ordinal estimations come out of the model with a channel dim
base_out = base_out.permute(1, 2, 0).cpu().numpy()
full_out = full_out.permute(1, 2, 0).cpu().numpy()
base_out = resize(base_out, (fh, fw))
# if we are using all inputs, we scale the input estimations using the base estimate
if inputs == 'all':
|
def run_pipeline(
models,
img_arr,
output_ordinal=False,
resize_conf=0.0,
base_size=384,
maintain_size=False,
linear=False,
device='cuda',
lstsq_p=0.0,
inputs='all'):
"""Runs the complete pipeline for shading and albedo prediction
params:
models (dict): models dictionary returned by model_util.load_models()
img_arr (np.array): RGB input image as numpy array between 0-1
output_ordinal (bool) optional: whether or not to output intermediate ordinal estimations
(default False)
resize_conf (float) optional: confidence to use for resizing (between 0-1) if None maintain
original size (default None)
base_size (int) optional: size of the base resolution estimation (default 384)
maintain_size (bool) optional: whether or not the results match the input image size
(default False)
linear (bool) optional: whether or not the input image is already linear (default False)
device (str) optional: string representing device to use for pipeline (default "cuda")
lstsq_p (float) optional: subsampling factor for computing least-squares fit
when matching the scale of base and full estimations (default 0.0)
inputs (str) optional: network inputs ("full", "base", "rgb", "all") the rgb image is
always included (default "all")
returns:
results (dict): a result dictionary with albedo, shading and potentiall ordinal estimations
"""
results = {}
orig_h, orig_w, _ = img_arr.shape
# if no confidence value set, just round original size to 32 for model input
if resize_conf is None:
img_arr = resize(img_arr, (round_32(orig_h), round_32(orig_w)), anti_aliasing=True)
# if a the confidence is an int, just rescale image so that the large side
# of the image matches the specified integer value
elif isinstance(resize_conf, int):
scale = resize_conf / max(orig_h, orig_w)
img_arr = resize(
img_arr,
(round_32(orig_h * scale), round_32(orig_w * scale)),
anti_aliasing=True)
# if the confidence is a float use the optimal resize code from Miangoleh et al.
elif isinstance(resize_conf, float):
img_arr = optimal_resize(img_arr, conf=resize_conf)
fh, fw, _ = img_arr.shape
# if the image is in sRGB we do simple linearization using gamma=2.2
if not linear:
lin_img = img_arr ** 2.2
else:
lin_img = img_arr
with torch.no_grad():
# ordinal shading estimation --------------------------
# resize image for base and full estimations and send through ordinal net
base_input = base_resize(lin_img, base_size)
full_input = lin_img
base_input = torch.from_numpy(base_input).permute(2, 0, 1).to(device).float()
full_input = torch.from_numpy(full_input).permute(2, 0, 1).to(device).float()
base_out = models['ordinal_model'](base_input.unsqueeze(0)).squeeze(0)
full_out = models['ordinal_model'](full_input.unsqueeze(0)).squeeze(0)
# the ordinal estimations come out of the model with a channel dim
base_out = base_out.permute(1, 2, 0).cpu().numpy()
full_out = full_out.permute(1, 2, 0).cpu().numpy()
base_out = resize(base_out, (fh, fw))
# if we are using all inputs, we scale the input estimations using the base estimate
if inputs == 'all': | ord_base, ord_full = equalize_predictions(lin_img, base_out, full_out, p=lstsq_p) | 1 | 2023-11-13 19:24:09+00:00 | 2k |
davep/tinboard | tinboard/widgets/tags.py | [
{
"identifier": "ClearTags",
"path": "tinboard/messages/tags.py",
"snippet": "class ClearTags(Message):\n \"\"\"Clear any tags being used to filter.\"\"\""
},
{
"identifier": "ShowAlsoTaggedWith",
"path": "tinboard/messages/tags.py",
"snippet": "class ShowAlsoTaggedWith(TagMessage):\n \"\"\"Message to say bookmarks also of this tag should be shown.\"\"\""
},
{
"identifier": "ShowTaggedWith",
"path": "tinboard/messages/tags.py",
"snippet": "class ShowTaggedWith(TagMessage):\n \"\"\"Message to say bookmarks of this tag should be shown.\"\"\""
},
{
"identifier": "OptionListEx",
"path": "tinboard/widgets/extended_option_list.py",
"snippet": "class OptionListEx(OptionList):\n \"\"\"The Textual `OptionList` with more navigation keys.\"\"\"\n\n BINDINGS = [\n Binding(\"s, j\", \"cursor_down\", show=False),\n Binding(\"w, k\", \"cursor_up\", show=False),\n ]\n\n def clear_options(self) -> Self:\n \"\"\"Workaround for https://github.com/Textualize/textual/issues/3714\"\"\"\n super().clear_options()\n self._clear_content_tracking()\n return self"
}
] | from typing_extensions import Final, Self
from textual import on
from textual.binding import Binding
from textual.events import Focus
from textual.reactive import var
from textual.widgets.option_list import Option, OptionDoesNotExist
from rich.console import RenderableType
from rich.emoji import Emoji
from rich.table import Table
from ..messages import ClearTags, ShowAlsoTaggedWith, ShowTaggedWith
from .extended_option_list import OptionListEx | 992 | """Defines a widget for picking tags."""
##############################################################################
# Backward compatibility.
from __future__ import annotations
##############################################################################
# Python imports.
##############################################################################
# Textual imports.
##############################################################################
# Rich imports.
##############################################################################
# Local imports.
##############################################################################
class Tags(OptionListEx):
"""A menu of tags."""
CONTEXT_HELP = """
## Tag list keys
The following keys are available in the list of tags:
| Key | Description |
| - | - |
| <kbd>Enter</kbd> | Show bookmarks with this tag in the bookmark list. |
| <kbd>+</kbd> | Add this tag to any tag filter active in the bookmark list. |
"""
DEFAULT_CSS = """
Tags {
&:focus {
border: blank;
}
&> .option-list--option {
padding: 0 1;
}
}
"""
BINDINGS = [
Binding("enter", "select", "Show tagged", show=True),
Binding("+", "also_tagged", "Show also tagged"),
]
def _prompt(self, tag: str, count: int) -> RenderableType:
"""A prompt for the given tag.
Args:
tag: The tag to build a prompt for.
count: The count for that tag.
Returns:
The prompt for the tag.
"""
prompt = Table.grid(expand=True)
prompt.add_column(ratio=1)
prompt.add_column(justify="right")
prompt.add_row(tag, f"[dim i]{count}[/]")
return prompt
def _sorted(self, tags: list[tuple[str, int]]) -> list[tuple[str, int]]:
"""Sort the tags.
Args:
tags: The tags to sort.
Returns:
The tags in the desired sort order.
"""
return tags
def show(self, tags: list[tuple[str, int]]) -> Self:
"""Show the given list of tags.
Args:
tags: The tags to show in the widget.
Returns:
Self.
"""
self.can_focus = bool(tags)
highlighted_tag = (
self.get_option_at_index(self.highlighted).id
if self.highlighted is not None
else None
)
try:
return self.clear_options().add_options(
[
Option(self._prompt(tag, count), id=tag)
for tag, count in self._sorted(tags)
]
)
finally:
if tags:
try:
self.highlighted = self.get_option_index(highlighted_tag or "")
except OptionDoesNotExist:
self.highlighted = 0
def _on_focus(self, _: Focus) -> None:
"""Highlight the first item on focus, if none highlighted."""
if self.option_count and self.highlighted is None:
self.highlighted = 0
@on(OptionListEx.OptionSelected)
def _show_tagged(self, event: OptionListEx.OptionSelected) -> None:
"""Request that bookmarks of a given tag are shown.
Args:
event: The event to handle.
"""
if event.option.id is not None:
| """Defines a widget for picking tags."""
##############################################################################
# Backward compatibility.
from __future__ import annotations
##############################################################################
# Python imports.
##############################################################################
# Textual imports.
##############################################################################
# Rich imports.
##############################################################################
# Local imports.
##############################################################################
class Tags(OptionListEx):
"""A menu of tags."""
CONTEXT_HELP = """
## Tag list keys
The following keys are available in the list of tags:
| Key | Description |
| - | - |
| <kbd>Enter</kbd> | Show bookmarks with this tag in the bookmark list. |
| <kbd>+</kbd> | Add this tag to any tag filter active in the bookmark list. |
"""
DEFAULT_CSS = """
Tags {
&:focus {
border: blank;
}
&> .option-list--option {
padding: 0 1;
}
}
"""
BINDINGS = [
Binding("enter", "select", "Show tagged", show=True),
Binding("+", "also_tagged", "Show also tagged"),
]
def _prompt(self, tag: str, count: int) -> RenderableType:
"""A prompt for the given tag.
Args:
tag: The tag to build a prompt for.
count: The count for that tag.
Returns:
The prompt for the tag.
"""
prompt = Table.grid(expand=True)
prompt.add_column(ratio=1)
prompt.add_column(justify="right")
prompt.add_row(tag, f"[dim i]{count}[/]")
return prompt
def _sorted(self, tags: list[tuple[str, int]]) -> list[tuple[str, int]]:
"""Sort the tags.
Args:
tags: The tags to sort.
Returns:
The tags in the desired sort order.
"""
return tags
def show(self, tags: list[tuple[str, int]]) -> Self:
"""Show the given list of tags.
Args:
tags: The tags to show in the widget.
Returns:
Self.
"""
self.can_focus = bool(tags)
highlighted_tag = (
self.get_option_at_index(self.highlighted).id
if self.highlighted is not None
else None
)
try:
return self.clear_options().add_options(
[
Option(self._prompt(tag, count), id=tag)
for tag, count in self._sorted(tags)
]
)
finally:
if tags:
try:
self.highlighted = self.get_option_index(highlighted_tag or "")
except OptionDoesNotExist:
self.highlighted = 0
def _on_focus(self, _: Focus) -> None:
"""Highlight the first item on focus, if none highlighted."""
if self.option_count and self.highlighted is None:
self.highlighted = 0
@on(OptionListEx.OptionSelected)
def _show_tagged(self, event: OptionListEx.OptionSelected) -> None:
"""Request that bookmarks of a given tag are shown.
Args:
event: The event to handle.
"""
if event.option.id is not None: | self.post_message(ShowTaggedWith(event.option.id)) | 2 | 2023-11-13 08:19:41+00:00 | 2k |
buptlihang/CVLM | evaluation/MME/evaluate.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "model/utils.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "model/utils.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "model/utils.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "model/utils.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
},
{
"identifier": "build_conversation",
"path": "model/utils.py",
"snippet": "def build_conversation():\n conversation = Conversation(\n system=\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the human's questions.\",\n roles=[\"USER\", \"ASSISTANT\"],\n version=\"TWO\",\n messages=[],\n offset=0,\n sep_style=SeparatorStyle.TWO,\n sep=\" \",\n sep2=\"</s>\",\n )\n return conversation"
},
{
"identifier": "load_pretrained_model",
"path": "model/utils.py",
"snippet": "def load_pretrained_model(model_path,\n load_8bit=False,\n load_4bit=False,\n device_map=\"auto\",\n device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4')\n else:\n kwargs['torch_dtype'] = torch.float16\n\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model, output_loading_info = AutoModelForCausalLM.from_pretrained(\n model_path, output_loading_info=True, **kwargs)\n model.resize_token_embeddings(len(tokenizer))\n image_processor = model.model.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len"
},
{
"identifier": "disable_torch_init",
"path": "model/utils.py",
"snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)"
},
{
"identifier": "get_model_name_from_path",
"path": "model/utils.py",
"snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]"
},
{
"identifier": "tokenizer_image_token",
"path": "model/utils.py",
"snippet": "def tokenizer_image_token(prompt,\n tokenizer,\n image_token_index=IMAGE_TOKEN_INDEX,\n return_tensors=None):\n prompt_chunks = [\n tokenizer(chunk).input_ids for chunk in prompt.split('<image>')\n ]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X))\n for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(\n prompt_chunks[0]\n ) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks,\n [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "process_images",
"path": "model/utils.py",
"snippet": "def process_images(images, image_processor, model_cfg):\n new_images = []\n for image in images:\n image = expand2square(\n image, tuple(int(x * 255) for x in image_processor.image_mean))\n image = image_processor.preprocess(\n image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images"
}
] | import argparse
import torch
import os
import json
import math
from tqdm import tqdm
from model.utils import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from model.utils import build_conversation, load_pretrained_model, disable_torch_init, get_model_name_from_path
from model.utils import tokenizer_image_token, process_images
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from collections import defaultdict | 1,490 |
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def get_gt(data_path):
GT = {}
for category in os.listdir(data_path):
category_dir = os.path.join(data_path, category)
if not os.path.isdir(category_dir):
continue
if os.path.exists(os.path.join(category_dir, 'images')):
image_path = os.path.join(category_dir, 'images')
qa_path = os.path.join(category_dir, 'questions_answers_YN')
else:
image_path = qa_path = category_dir
assert os.path.isdir(image_path), image_path
assert os.path.isdir(qa_path), qa_path
for file in os.listdir(qa_path):
if not file.endswith('.txt'):
continue
for line in open(os.path.join(qa_path, file)):
question, answer = line.strip().split('\t')
GT[(category, file, question)] = answer
return GT
# Custom dataset class
class CustomDataset(Dataset):
def __init__(self, questions, image_folder, tokenizer, image_processor,
model_config):
self.questions = questions
self.image_folder = image_folder
self.tokenizer = tokenizer
self.image_processor = image_processor
self.model_config = model_config
def __getitem__(self, index):
line = self.questions[index]
image_file = line["image"]
qs = line["text"]
|
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def get_gt(data_path):
GT = {}
for category in os.listdir(data_path):
category_dir = os.path.join(data_path, category)
if not os.path.isdir(category_dir):
continue
if os.path.exists(os.path.join(category_dir, 'images')):
image_path = os.path.join(category_dir, 'images')
qa_path = os.path.join(category_dir, 'questions_answers_YN')
else:
image_path = qa_path = category_dir
assert os.path.isdir(image_path), image_path
assert os.path.isdir(qa_path), qa_path
for file in os.listdir(qa_path):
if not file.endswith('.txt'):
continue
for line in open(os.path.join(qa_path, file)):
question, answer = line.strip().split('\t')
GT[(category, file, question)] = answer
return GT
# Custom dataset class
class CustomDataset(Dataset):
def __init__(self, questions, image_folder, tokenizer, image_processor,
model_config):
self.questions = questions
self.image_folder = image_folder
self.tokenizer = tokenizer
self.image_processor = image_processor
self.model_config = model_config
def __getitem__(self, index):
line = self.questions[index]
image_file = line["image"]
qs = line["text"] | qs = DEFAULT_IMAGE_TOKEN + '\n' + qs | 1 | 2023-11-10 03:52:46+00:00 | 2k |
vvvm23/TchAIkovsky | generate.py | [
{
"identifier": "get_pretrained_tokenizer",
"path": "data/tokenizer.py",
"snippet": "def get_pretrained_tokenizer(path: str = \"tokenizer.json\"):\n return miditok.REMI.from_pretrained(path)"
},
{
"identifier": "TchAIkovskyModel",
"path": "model/model.py",
"snippet": "class TchAIkovskyModel(eqx.Module):\n id_embeddings: eqx.Module\n pos_embeddings: eqx.Module\n decoder: eqx.Module\n norm_out: eqx.Module\n out_head: eqx.Module\n\n dtype: jnp.dtype = eqx.field(static=True)\n output_dtype: jnp.dtype = eqx.field(static=True)\n\n def __init__(\n self,\n dim: int,\n num_heads: int,\n num_layers: int,\n vocab_size: int,\n max_positions: int,\n head_dim: Optional[int] = None,\n dropout: float = 0.0,\n key: PRNGKey = None,\n dtype: jnp.dtype = jnp.float32,\n output_dtype: jnp.dtype = jnp.float32,\n ):\n self.dtype = dtype\n self.output_dtype = output_dtype\n id_embeddings_key, pos_embeddings_key, decoder_key, out_key = jax.random.split(key, 4)\n\n self.id_embeddings = eqx.nn.Embedding(vocab_size, dim, key=id_embeddings_key)\n self.pos_embeddings = eqx.nn.Embedding(max_positions, dim, key=pos_embeddings_key)\n\n self.decoder = Decoder(\n decoder_key,\n dim,\n num_heads,\n num_layers,\n head_dim=head_dim,\n dropout=dropout,\n dtype=dtype,\n )\n\n self.norm_out = eqx.nn.LayerNorm(dim)\n self.out_head = eqx.nn.Linear(dim, vocab_size, use_bias=True, key=out_key)\n\n def __call__(self, input_ids, position_ids, mask, key=None):\n causal_mask = make_causal_mask(input_ids)[0]\n mask = jnp.where(mask, causal_mask, 0)\n\n x = jax.vmap(self.id_embeddings)(input_ids) + jax.vmap(self.pos_embeddings)(position_ids)\n x = self.decoder(x, mask, key)\n\n x = jax.vmap(self.norm_out)(x)\n logits = jax.vmap(self.out_head)(x)\n logits = logits.astype(self.output_dtype)\n return logits"
},
{
"identifier": "seed_others",
"path": "utils.py",
"snippet": "def seed_others(seed):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)"
}
] | import json
import equinox as eqx
import jax
import jax.numpy as jnp
import numpy as np
import orbax.checkpoint as ocp
import tqdm
from argparse import ArgumentParser
from datetime import datetime
from pathlib import Path
from types import SimpleNamespace
from typing import Optional
from loguru import logger
from miditoolkit import MidiFile
from data.tokenizer import get_pretrained_tokenizer
from model import TchAIkovskyModel
from utils import seed_others | 1,331 |
def load_config(config_path):
with open(config_path, mode="r") as f:
data = f.read()
json_dict = json.loads(data)
return SimpleNamespace(**json_dict)
@eqx.filter_jit
@eqx.debug.assert_max_traces(max_traces=1)
def generate_step(model, inputs, length, key, temperature):
logits = model(**inputs)
logits = jnp.take(logits, length - 1, axis=0)
if temperature == 0.0:
# argmax sampling
return jnp.argmax(logits, axis=-1)
logits = logits / temperature
return jax.random.categorical(key, logits, axis=-1)
def generate_loop(
model,
initial_input,
temperature,
key,
max_to_generate: Optional[int] = None,
model_max_positions: int = 1024,
output_generated_only: bool = False,
) -> np.array:
sample_idx = initial_input.shape[0]
if output_generated_only:
output = []
else:
output = initial_input.tolist()
if max_to_generate is None:
DEFAULT_MAX = 1000
max_to_generate = DEFAULT_MAX
input_length = sample_idx + max_to_generate
if input_length > model_max_positions - 1:
input_length = model_max_positions - 1
position_ids = np.arange(input_length)
mask = np.concatenate(
[
np.ones((sample_idx,), dtype=bool),
np.zeros((input_length - sample_idx,), dtype=bool),
],
axis=-1,
dtype=bool,
)
input_ids = np.pad(initial_input, ((0, input_length - sample_idx),))
# TODO: maybe replace with jax.lax.scan loop for faster generation
for _ in tqdm.trange(max_to_generate):
key, subkey = jax.random.split(key)
inputs = dict(input_ids=input_ids, position_ids=position_ids, mask=mask)
token = generate_step(model, inputs, np.array(sample_idx), subkey, temperature).item()
output.append(token)
if sample_idx < input_length:
input_ids[sample_idx] = token
mask[sample_idx] = True
else:
input_ids = np.concatenate([input_ids[1:], np.array([token])], axis=-1)
sample_idx = min(input_length - 1, sample_idx + 1)
return np.array(output)
# tokenizes initial prompt
def tokenize_prompt(midi, tokenizer):
return tokenizer(midi)
# loads prompt MIDI file
def file_prompt(path):
midi = MidiFile(path)
return midi
def main(args):
logger.info("Beginning generation script.")
key = jax.random.PRNGKey(args.seed)
logger.info(f"Using PRNG key {args.seed}")
seed_others(args.seed)
logger.info("Loading config.")
config = load_config(args.config)
logger.info(f"Loading tokenizer from '{args.tokenizer}'")
|
def load_config(config_path):
with open(config_path, mode="r") as f:
data = f.read()
json_dict = json.loads(data)
return SimpleNamespace(**json_dict)
@eqx.filter_jit
@eqx.debug.assert_max_traces(max_traces=1)
def generate_step(model, inputs, length, key, temperature):
logits = model(**inputs)
logits = jnp.take(logits, length - 1, axis=0)
if temperature == 0.0:
# argmax sampling
return jnp.argmax(logits, axis=-1)
logits = logits / temperature
return jax.random.categorical(key, logits, axis=-1)
def generate_loop(
model,
initial_input,
temperature,
key,
max_to_generate: Optional[int] = None,
model_max_positions: int = 1024,
output_generated_only: bool = False,
) -> np.array:
sample_idx = initial_input.shape[0]
if output_generated_only:
output = []
else:
output = initial_input.tolist()
if max_to_generate is None:
DEFAULT_MAX = 1000
max_to_generate = DEFAULT_MAX
input_length = sample_idx + max_to_generate
if input_length > model_max_positions - 1:
input_length = model_max_positions - 1
position_ids = np.arange(input_length)
mask = np.concatenate(
[
np.ones((sample_idx,), dtype=bool),
np.zeros((input_length - sample_idx,), dtype=bool),
],
axis=-1,
dtype=bool,
)
input_ids = np.pad(initial_input, ((0, input_length - sample_idx),))
# TODO: maybe replace with jax.lax.scan loop for faster generation
for _ in tqdm.trange(max_to_generate):
key, subkey = jax.random.split(key)
inputs = dict(input_ids=input_ids, position_ids=position_ids, mask=mask)
token = generate_step(model, inputs, np.array(sample_idx), subkey, temperature).item()
output.append(token)
if sample_idx < input_length:
input_ids[sample_idx] = token
mask[sample_idx] = True
else:
input_ids = np.concatenate([input_ids[1:], np.array([token])], axis=-1)
sample_idx = min(input_length - 1, sample_idx + 1)
return np.array(output)
# tokenizes initial prompt
def tokenize_prompt(midi, tokenizer):
return tokenizer(midi)
# loads prompt MIDI file
def file_prompt(path):
midi = MidiFile(path)
return midi
def main(args):
logger.info("Beginning generation script.")
key = jax.random.PRNGKey(args.seed)
logger.info(f"Using PRNG key {args.seed}")
seed_others(args.seed)
logger.info("Loading config.")
config = load_config(args.config)
logger.info(f"Loading tokenizer from '{args.tokenizer}'") | tokenizer = get_pretrained_tokenizer(args.tokenizer) | 0 | 2023-11-13 07:31:30+00:00 | 2k |
dazhangyu123/ACMIL | architecture/ibmil.py | [
{
"identifier": "Classifier_1fc",
"path": "architecture/network.py",
"snippet": "class Classifier_1fc(nn.Module):\n def __init__(self, n_channels, n_classes, droprate=0.0):\n super(Classifier_1fc, self).__init__()\n self.fc = nn.Linear(n_channels, n_classes)\n self.droprate = droprate\n if self.droprate != 0.0:\n self.dropout = torch.nn.Dropout(p=self.droprate)\n\n def forward(self, x):\n\n if self.droprate != 0.0:\n x = self.dropout(x)\n x = self.fc(x)\n return x"
},
{
"identifier": "DimReduction",
"path": "architecture/network.py",
"snippet": "class DimReduction(nn.Module):\n def __init__(self, n_channels, m_dim=512, numLayer_Res=0):\n super(DimReduction, self).__init__()\n self.fc1 = nn.Linear(n_channels, m_dim, bias=False)\n self.relu1 = nn.ReLU(inplace=True)\n self.numRes = numLayer_Res\n\n self.resBlocks = []\n for ii in range(numLayer_Res):\n self.resBlocks.append(residual_block(m_dim))\n self.resBlocks = nn.Sequential(*self.resBlocks)\n\n def forward(self, x):\n\n x = self.fc1(x)\n x = self.relu1(x)\n\n if self.numRes > 0:\n x = self.resBlocks(x)\n\n return x"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from architecture.network import Classifier_1fc, DimReduction
| 694 |
class Attention_Gated(nn.Module):
def __init__(self, L=512, D=128, K=1):
super(Attention_Gated, self).__init__()
self.L = L
self.D = D
self.K = K
self.attention_V = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Tanh()
)
self.attention_U = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Sigmoid()
)
self.attention_weights = nn.Linear(self.D, self.K)
def forward(self, x):
## x: N x L
A_V = self.attention_V(x) # NxD
A_U = self.attention_U(x) # NxD
A = self.attention_weights(A_V * A_U) # NxK
A = torch.transpose(A, 1, 0) # KxN
return A ### K x N
class IBMIL(nn.Module):
def __init__(self, conf, confounder_dim=128, confounder_merge='cat'):
super(IBMIL, self).__init__()
self.confounder_merge = confounder_merge
assert confounder_merge in ['cat', 'add', 'sub']
self.dimreduction = DimReduction(conf.D_feat, conf.D_inner)
self.attention = Attention_Gated(conf.D_inner, 128, 1)
|
class Attention_Gated(nn.Module):
def __init__(self, L=512, D=128, K=1):
super(Attention_Gated, self).__init__()
self.L = L
self.D = D
self.K = K
self.attention_V = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Tanh()
)
self.attention_U = nn.Sequential(
nn.Linear(self.L, self.D),
nn.Sigmoid()
)
self.attention_weights = nn.Linear(self.D, self.K)
def forward(self, x):
## x: N x L
A_V = self.attention_V(x) # NxD
A_U = self.attention_U(x) # NxD
A = self.attention_weights(A_V * A_U) # NxK
A = torch.transpose(A, 1, 0) # KxN
return A ### K x N
class IBMIL(nn.Module):
def __init__(self, conf, confounder_dim=128, confounder_merge='cat'):
super(IBMIL, self).__init__()
self.confounder_merge = confounder_merge
assert confounder_merge in ['cat', 'add', 'sub']
self.dimreduction = DimReduction(conf.D_feat, conf.D_inner)
self.attention = Attention_Gated(conf.D_inner, 128, 1)
| self.classifier = Classifier_1fc(conf.D_inner, conf.n_class, 0)
| 0 | 2023-11-12 14:07:34+00:00 | 2k |
Kav-K/Described | services/openai_service.py | [
{
"identifier": "EnvService",
"path": "services/environment_service.py",
"snippet": "class EnvService:\n # To be expanded upon later!\n def __init__(self):\n self.env = {}\n\n @staticmethod\n def environment_path_with_fallback(env_name, relative_fallback=None):\n directory = os.getenv(env_name)\n if directory is not None:\n return Path(directory).resolve()\n\n if relative_fallback:\n app_relative = (app_root_path() / relative_fallback).resolve()\n if app_relative.exists():\n return app_relative\n\n return Path.cwd()\n\n @staticmethod\n def save_path():\n share_dir = os.getenv(\"SHARE_DIR\")\n if share_dir is not None:\n return Path(share_dir)\n return app_root_path()\n\n @staticmethod\n def find_shared_file(file_name):\n share_file_paths = []\n share_dir = os.getenv(\"SHARE_DIR\")\n if share_dir is not None:\n share_file_paths.append(Path(share_dir) / file_name)\n\n share_file_paths.extend(\n [\n app_root_path() / \"share\" / file_name,\n app_root_path() / file_name,\n Path(file_name),\n ]\n )\n\n for share_file_path in share_file_paths:\n if share_file_path.exists():\n return share_file_path.resolve()\n\n raise ValueError(f\"Unable to find shared data file {file_name}\")\n\n @staticmethod\n def get_allowed_guilds():\n # ALLOWED_GUILDS is a comma separated list of guild ids\n # It can also just be one guild ID\n # Read these allowed guilds and return as a list of ints\n try:\n allowed_guilds = os.getenv(\"ALLOWED_GUILDS\")\n except Exception:\n allowed_guilds = None\n\n if allowed_guilds is None:\n raise ValueError(\n \"ALLOWED_GUILDS is not defined properly in the environment file!\"\n \"Please copy your server's guild ID and put it into ALLOWED_GUILDS in the .env file.\"\n 'For example a line should look like: `ALLOWED_GUILDS=\"971268468148166697\"`'\n )\n\n allowed_guilds = (\n allowed_guilds.split(\",\") if \",\" in allowed_guilds else [allowed_guilds]\n )\n allowed_guilds = [int(guild) for guild in allowed_guilds]\n return allowed_guilds\n\n @staticmethod\n def get_described_channels():\n # ALLOWED_GUILDS is a comma separated list of guild ids\n # It can also just be one guild ID\n # Read these allowed guilds and return as a list of ints\n try:\n described_channels = os.getenv(\"DESCRIBED_CHANNELS\")\n except Exception:\n described_channels = None\n\n if described_channels is None:\n raise ValueError(\n \"DESCRIBED_CHANNELS is not properly defined in your environment file. All channels will be enabled for image descriptions\"\n )\n\n described_channels = (\n described_channels.split(\",\")\n if \",\" in described_channels\n else [described_channels]\n )\n return described_channels\n\n @staticmethod\n def get_discord_token():\n try:\n e2b_key = os.getenv(\"DISCORD_TOKEN\")\n return e2b_key\n except Exception:\n return None\n\n @staticmethod\n def get_openai_api_key():\n try:\n openai_key = os.getenv(\"OPENAI_API_KEY\")\n return openai_key\n except Exception:\n return None\n\n @staticmethod\n def get_admin_roles():\n # ADMIN_ROLES is a comma separated list of string roles\n # It can also just be one role\n # Read these allowed roles and return as a list of strings\n try:\n admin_roles = os.getenv(\"ADMIN_ROLES\")\n except Exception:\n admin_roles = None\n\n if admin_roles is None:\n print(\n \"ADMIN_ROLES is not defined properly in the environment file!\"\n \"Please copy your server's role and put it into ADMIN_ROLES in the .env file.\"\n 'For example a line should look like: `ADMIN_ROLES=\"Admin\"`'\n )\n print(\"Defaulting to allowing all users to use admin commands...\")\n return [None]\n\n admin_roles = (\n admin_roles.lower().split(\",\")\n if \",\" in admin_roles\n else [admin_roles.lower()]\n )\n return admin_roles"
},
{
"identifier": "IMAGE_ANALYSIS_PROMPT",
"path": "services/prompts/image_analysis_prompt.py",
"snippet": "IMAGE_ANALYSIS_PROMPT = \"\"\"\nYou are an image describer. You will be given one or more images and your goal is to describe all of the details in incredible, verbose detail. Pretend as if you are describing an image for a user that is visually impaired, thinking what information would most be useful for them to understand the image holistically.\nMake note to describe and talk about:\n- The colors of the image\n- The shapes of the objects in the image\n- The objects themselves in the image and what they are\n- Actions happening in the image\n- The scenery and landscape of the image\n- The emotions of the people in the image\n- The weather of the image\n- The time of day of the image\n- The set and setting of the image holistically.\n- Always perform OCR and extract all the text from the image when possible.\nAlways respond in third person, talk about the image provided in third person and describe it as if you are describing it to someone who is visually impaired.\nBe incredibly, very brief and concise while still conveying all the information possible.\nNow, describe an image. They will be given to you:\n\"\"\""
}
] | import traceback
import aiohttp
import backoff
from services.environment_service import EnvService
from services.prompts.image_analysis_prompt import IMAGE_ANALYSIS_PROMPT | 1,461 |
def backoff_handler_request(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].args[0]}"
)
class OpenAIExecutor:
def __init__(self):
self.openai_api_key = EnvService.get_openai_api_key()
try:
|
def backoff_handler_request(details):
print(
f"Backing off {details['wait']:0.1f} seconds after {details['tries']} tries calling function {details['target']} | "
f"{details['exception'].args[0]}"
)
class OpenAIExecutor:
def __init__(self):
self.openai_api_key = EnvService.get_openai_api_key()
try: | self.ANALYSIS_PRETEXT = IMAGE_ANALYSIS_PROMPT | 1 | 2023-11-14 02:22:13+00:00 | 2k |
juftin/hatch-pip-compile | tests/test_installer.py | [
{
"identifier": "HatchPipCompileError",
"path": "hatch_pip_compile/exceptions.py",
"snippet": "class HatchPipCompileError(Exception):\n \"\"\"\n Base exception for hatch-pip-compile\n \"\"\""
},
{
"identifier": "PluginInstaller",
"path": "hatch_pip_compile/installer.py",
"snippet": "class PluginInstaller(ABC):\n \"\"\"\n Package Installer for the plugin\n\n This abstract base class is used to define the interface for\n how the plugin should install packages and dependencies.\n \"\"\"\n\n environment: \"PipCompileEnvironment\"\n\n @abstractmethod\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies\n \"\"\"\n\n def sync_dependencies(self) -> None:\n \"\"\"\n Sync the dependencies - same as `install_dependencies`\n \"\"\"\n self.install_dependencies()\n\n def install_project(self) -> None:\n \"\"\"\n Install the project (`--no-deps`)\n \"\"\"\n with self.environment.safe_activation():\n self.environment.plugin_check_command(\n self.environment.construct_pip_install_command(\n args=[\"--no-deps\", str(self.environment.root)]\n )\n )\n\n def install_project_dev_mode(self) -> None:\n \"\"\"\n Install the project in editable mode (`--no-deps`)\n \"\"\"\n with self.environment.safe_activation():\n self.environment.plugin_check_command(\n self.environment.construct_pip_install_command(\n args=[\"--no-deps\", \"--editable\", str(self.environment.root)]\n )\n )"
},
{
"identifier": "PipCompileFixture",
"path": "tests/conftest.py",
"snippet": "class PipCompileFixture:\n \"\"\"\n Testing Fixture Data Container\n \"\"\"\n\n __test__ = False\n\n isolation: pathlib.Path\n toml_doc: tomlkit.TOMLDocument\n pyproject: pathlib.Path\n project: Project\n platform: Platform\n isolated_data_dir: pathlib.Path\n\n application: Application = field(init=False)\n default_environment: PipCompileEnvironment = field(init=False)\n test_environment: PipCompileEnvironment = field(init=False)\n\n def __post_init__(self) -> None:\n \"\"\"\n Post Init\n \"\"\"\n self.application = Application(\n exit_func=lambda x: None, # noqa: ARG005\n verbosity=0,\n interactive=False,\n enable_color=False,\n )\n self.application.data_dir = self.isolated_data_dir\n self.application.project = self.project\n self.default_environment = self.reload_environment(\"default\")\n self.test_environment = self.reload_environment(\"test\")\n\n def reload_environment(self, environment: str) -> PipCompileEnvironment:\n \"\"\"\n Reload a new environment given the current state of the isolated project\n \"\"\"\n new_project = Project(self.isolation)\n return PipCompileEnvironment(\n root=self.isolation,\n metadata=new_project.metadata,\n name=environment,\n config=new_project.config.envs[environment],\n matrix_variables={},\n data_directory=self.isolated_data_dir,\n isolated_data_directory=self.isolated_data_dir,\n platform=self.platform,\n verbosity=0,\n )\n\n def update_pyproject(self) -> None:\n \"\"\"\n Update pyproject.toml\n \"\"\"\n tomlkit.dump(self.toml_doc, self.pyproject.open(\"w\"))"
}
] | from typing import Dict, Type
from unittest.mock import Mock
from hatch_pip_compile.exceptions import HatchPipCompileError
from hatch_pip_compile.installer import PluginInstaller
from tests.conftest import PipCompileFixture
import pytest | 1,134 | """
Installation Tests
"""
def test_pip_install_dependencies(mock_check_command: Mock, pip_compile: PipCompileFixture) -> None:
"""
Assert the `pip` installation command is called with the expected arguments
"""
pip_compile.default_environment.create()
pip_compile.default_environment.installer.install_dependencies()
expected_call = [
"python",
"-u",
"-m",
"pip",
"install",
"--disable-pip-version-check",
"--no-python-version-warning",
"-q",
"--requirement",
]
call_args = list(mock_check_command.call_args)[0][0][:-1]
assert call_args == expected_call
@pytest.mark.parametrize("installer", ["pip", "pip-sync"])
def test_installer_type(
installer: str, installer_dict: Dict[str, Type[PluginInstaller]], pip_compile: PipCompileFixture
) -> None:
"""
Test the `pip-compile-installer` configuration option
"""
pip_compile.toml_doc["tool"]["hatch"]["envs"]["default"]["pip-compile-installer"] = installer
pip_compile.update_pyproject()
updated_environment = pip_compile.reload_environment("default")
assert isinstance(updated_environment.installer, installer_dict[installer])
def test_installer_unknown(pip_compile: PipCompileFixture) -> None:
"""
Test that an exception is raised when an unknown installer is configured
"""
pip_compile.toml_doc["tool"]["hatch"]["envs"]["default"]["pip-compile-installer"] = "unknown"
pip_compile.update_pyproject()
| """
Installation Tests
"""
def test_pip_install_dependencies(mock_check_command: Mock, pip_compile: PipCompileFixture) -> None:
"""
Assert the `pip` installation command is called with the expected arguments
"""
pip_compile.default_environment.create()
pip_compile.default_environment.installer.install_dependencies()
expected_call = [
"python",
"-u",
"-m",
"pip",
"install",
"--disable-pip-version-check",
"--no-python-version-warning",
"-q",
"--requirement",
]
call_args = list(mock_check_command.call_args)[0][0][:-1]
assert call_args == expected_call
@pytest.mark.parametrize("installer", ["pip", "pip-sync"])
def test_installer_type(
installer: str, installer_dict: Dict[str, Type[PluginInstaller]], pip_compile: PipCompileFixture
) -> None:
"""
Test the `pip-compile-installer` configuration option
"""
pip_compile.toml_doc["tool"]["hatch"]["envs"]["default"]["pip-compile-installer"] = installer
pip_compile.update_pyproject()
updated_environment = pip_compile.reload_environment("default")
assert isinstance(updated_environment.installer, installer_dict[installer])
def test_installer_unknown(pip_compile: PipCompileFixture) -> None:
"""
Test that an exception is raised when an unknown installer is configured
"""
pip_compile.toml_doc["tool"]["hatch"]["envs"]["default"]["pip-compile-installer"] = "unknown"
pip_compile.update_pyproject() | with pytest.raises(HatchPipCompileError): | 0 | 2023-11-10 00:34:00+00:00 | 2k |
google-deepmind/pix2act | pix2act/tasks/miniwob/search/write_value_fn_tf_examples.py | [
{
"identifier": "tf_utils",
"path": "pix2act/common/tf_utils.py",
"snippet": "def add_bytes_feature(\n example: tf.train.Example, key: str, value: bytes\n) -> None:\ndef add_text_feature(example: tf.train.Example, key: str, value: str) -> None:\ndef get_bytes_feature(example: tf.train.Example, key: str) -> bytes:\ndef get_text_feature(example: tf.train.Example, key: str) -> str:\ndef _get_hash(key: str) -> int:\ndef _increment_counter(item, counter):\n def __init__(\n self,\n output_dir: str,\n validation_percent: Optional[int] = 10,\n train_file_name: str = \"train.tfr\",\n val_file_name: str = \"val.tfr\",\n ):\n def _partition_index(\n self, example: tf.train.Example, unused_num_partitions: int\n ) -> int:\n def expand(self, pcoll):\nclass SplitAndWriteTFRecords(beam.PTransform):"
},
{
"identifier": "reward_utils",
"path": "pix2act/tasks/miniwob/search/reward_utils.py",
"snippet": "STEP_PENALTY = -1.0 / 30\nVALUE_FN_SCALAR = 30\n_NORMALIZED_REWARD_THRESHOLD = 0.9\ndef compute_surrogate_reward(raw_reward, steps_to_go=0):\ndef surrogate_reward_to_value_fn_target(surrogate_reward):\ndef value_fn_output_to_surrogate_reward(value_fn_output):"
}
] | from absl import app
from absl import flags
from pix2act.common import tf_utils
from pix2act.tasks.miniwob import episode_pb2
from pix2act.tasks.miniwob.search import reward_utils
import apache_beam as beam
import tensorflow as tf | 820 | # Copyright 2023 The pix2act Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Converts episodes to tf examples for training value function approximator.
"""
FLAGS = flags.FLAGS
flags.DEFINE_list("inputs", "", "Input tfrecord files of Episodes.")
flags.DEFINE_string("output_dir", "", "Output location for tf examples.")
flags.DEFINE_float(
"reward_threshold",
0.8,
"Demonstrations below this threshold will be discarded.",
)
class ConvertEpisode(beam.DoFn):
"""Convert episode to tf examples."""
def process(self, episode):
if not episode.task_name:
beam.metrics.Metrics.counter("ConvertEpisode", "no_task_name").inc()
elif not episode.steps:
beam.metrics.Metrics.counter("no_steps", episode.task_name).inc()
elif episode.raw_reward < FLAGS.reward_threshold:
beam.metrics.Metrics.counter(
"failed_demonstration", episode.task_name
).inc()
else:
beam.metrics.Metrics.counter("num_demos", episode.task_name).inc()
try:
total_steps = len(episode.steps)
for step_idx, step in enumerate(episode.steps):
steps_to_go = total_steps - step_idx
surrogate_reward = reward_utils.compute_surrogate_reward(
episode.raw_reward, steps_to_go
)
value_fn_target = reward_utils.surrogate_reward_to_value_fn_target(
surrogate_reward
)
example = tf.train.Example()
| # Copyright 2023 The pix2act Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Converts episodes to tf examples for training value function approximator.
"""
FLAGS = flags.FLAGS
flags.DEFINE_list("inputs", "", "Input tfrecord files of Episodes.")
flags.DEFINE_string("output_dir", "", "Output location for tf examples.")
flags.DEFINE_float(
"reward_threshold",
0.8,
"Demonstrations below this threshold will be discarded.",
)
class ConvertEpisode(beam.DoFn):
"""Convert episode to tf examples."""
def process(self, episode):
if not episode.task_name:
beam.metrics.Metrics.counter("ConvertEpisode", "no_task_name").inc()
elif not episode.steps:
beam.metrics.Metrics.counter("no_steps", episode.task_name).inc()
elif episode.raw_reward < FLAGS.reward_threshold:
beam.metrics.Metrics.counter(
"failed_demonstration", episode.task_name
).inc()
else:
beam.metrics.Metrics.counter("num_demos", episode.task_name).inc()
try:
total_steps = len(episode.steps)
for step_idx, step in enumerate(episode.steps):
steps_to_go = total_steps - step_idx
surrogate_reward = reward_utils.compute_surrogate_reward(
episode.raw_reward, steps_to_go
)
value_fn_target = reward_utils.surrogate_reward_to_value_fn_target(
surrogate_reward
)
example = tf.train.Example()
| tf_utils.add_bytes_feature(example, "image", step.screenshot_png) | 0 | 2023-11-13 22:50:55+00:00 | 2k |
zhang-tao-whu/DVIS_Plus | mask2former/modeling/meta_arch/mask_former_head.py | [
{
"identifier": "build_transformer_decoder",
"path": "mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py",
"snippet": "def build_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME\n return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)"
},
{
"identifier": "build_pixel_decoder",
"path": "mask2former/modeling/pixel_decoder/fpn.py",
"snippet": "def build_pixel_decoder(cfg, input_shape):\n \"\"\"\n Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.\n \"\"\"\n name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME\n model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)\n forward_features = getattr(model, \"forward_features\", None)\n if not callable(forward_features):\n raise ValueError(\n \"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. \"\n f\"Please implement forward_features for {name} to only return mask features.\"\n )\n return model"
}
] | import logging
import fvcore.nn.weight_init as weight_init
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder
from ..pixel_decoder.fpn import build_pixel_decoder | 1,271 | # Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
# if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
# newk = k.replace(prefix, prefix + "pixel_decoder.")
# # logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
return_transformer_feature: bool = False,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.return_transformer_feature = return_transformer_feature
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"return_transformer_feature": cfg.MODEL.SEM_SEG_HEAD.RETURN_TRANSFORMER_FEATURE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
| # Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
# if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
# newk = k.replace(prefix, prefix + "pixel_decoder.")
# # logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
return_transformer_feature: bool = False,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.return_transformer_feature = return_transformer_feature
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"return_transformer_feature": cfg.MODEL.SEM_SEG_HEAD.RETURN_TRANSFORMER_FEATURE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE, | "transformer_predictor": build_transformer_decoder( | 0 | 2023-11-14 10:55:11+00:00 | 2k |
teamreboott/data-modori | data_modori/ops/filter/language_id_score_filter.py | [
{
"identifier": "Fields",
"path": "data_modori/utils/constant.py",
"snippet": "class Fields(object):\n stats = DEFAULT_PREFIX + 'stats__'\n meta = DEFAULT_PREFIX + 'meta__'\n context = DEFAULT_PREFIX + 'context__'\n suffix = DEFAULT_PREFIX + 'suffix__'"
},
{
"identifier": "StatsKeys",
"path": "data_modori/utils/constant.py",
"snippet": "class StatsKeys(object):\n alpha_token_ratio = 'alpha_token_ratio'\n korean_token_ratio = 'korean_token_ratio'\n alnum_ratio = 'alnum_ratio'\n avg_line_length = 'avg_line_length'\n char_rep_ratio = 'char_rep_ratio'\n flagged_words_ratio = 'flagged_words_ratio'\n lang = 'lang'\n lang_score = 'lang_score'\n max_line_length = 'max_line_length'\n perplexity = 'perplexity'\n special_char_ratio = 'special_char_ratio'\n stopwords_ratio = 'stopwords_ratio'\n text_len = 'text_len'\n num_token = 'num_token'\n num_words = 'num_words'\n word_rep_ratio = 'word_rep_ratio'"
},
{
"identifier": "prepare_model",
"path": "data_modori/utils/model_utils.py",
"snippet": "def prepare_model(lang='en', model_type='sentencepiece', model_key=None):\n \"\"\"\n Prepare and load a model or a tokenizer from MODEL_ZOO.\n\n :param lang: which lang model to load\n :param model_type: model or tokenizer type\n :param model_key: tokenizer name, only used when prepare HuggingFace\n tokenizer\n :return: a model or tokenizer instance\n \"\"\"\n\n type_to_name = {\n 'fasttext': ('lid.176.bin', prepare_fasttext_model),\n 'sentencepiece': ('%s.sp.model', prepare_sentencepiece_model),\n 'kenlm': ('%s.arpa.bin', prepare_kenlm_model),\n 'nltk': ('punkt.%s.pickle', prepare_nltk_model),\n 'huggingface': ('%s', prepare_huggingface_tokenizer),\n 'spacy': ('%s_core_web_md-3.5.0', prepare_diversity_model),\n 'spacy_ko': ('%s_core_news_md-3.7.0', prepare_diversity_model),\n 'konlpy': ('%s', prepare_konlpy_model),\n }\n assert model_type in type_to_name.keys(\n ), 'model_type must be one of the following: {}'.format(\n list(type_to_name.keys()))\n\n if model_key is None:\n model_key = model_type + '_' + lang\n if model_key not in MODEL_ZOO.keys():\n model_name, model_func = type_to_name[model_type]\n if model_type == 'fasttext':\n MODEL_ZOO[model_key] = model_func(model_name)\n elif model_type == 'huggingface':\n MODEL_ZOO[model_key] = model_func(model_key)\n elif model_type == 'konlpy':\n MODEL_ZOO[model_key] = model_func()\n else:\n MODEL_ZOO[model_key] = model_func(model_name, lang)\n return model_key"
},
{
"identifier": "get_model",
"path": "data_modori/utils/model_utils.py",
"snippet": "def get_model(model_key, lang='en', model_type='sentencepiece'):\n \"\"\"\n Get a model or a tokenizer from MODEL_ZOO.\n\n :param model_key: name of the model or tokenzier\n \"\"\"\n if model_key not in MODEL_ZOO:\n prepare_model(lang=lang, model_type=model_type, model_key=model_key)\n return MODEL_ZOO.get(model_key, None)"
},
{
"identifier": "OPERATORS",
"path": "data_modori/ops/base_op.py",
"snippet": "OPERATORS = Registry('Operators')"
},
{
"identifier": "Filter",
"path": "data_modori/ops/base_op.py",
"snippet": "class Filter:\n\n def __init__(self, text_key: str = None):\n \"\"\"\n Base class that removes specific info.\n\n :param text_key: the key name of field that stores sample texts\n to be processed\n \"\"\"\n if text_key is None:\n text_key = 'text'\n self.text_key = text_key\n from data_modori.core.data import wrap_func_with_nested_access\n self.process = wrap_func_with_nested_access(self.process)\n self.compute_stats = wrap_func_with_nested_access(self.compute_stats)\n\n def compute_stats(self, sample, context=False):\n \"\"\"\n Compute stats for the sample which is used as a metric to decide\n whether to filter this sample.\n\n :param sample: input sample.\n :param context: whether to store context information of intermediate\n vars in the sample temporarily.\n :return: sample with computed stats\n \"\"\"\n raise NotImplementedError\n\n def process(self, sample):\n \"\"\"\n For sample level, sample --> Boolean.\n\n :param sample: sample to decide whether to filter\n :return: true for keeping and false for filtering\n \"\"\"\n raise NotImplementedError"
}
] | from jsonargparse.typing import ClosedUnitInterval
from loguru import logger
from data_modori.utils.constant import Fields, StatsKeys
from data_modori.utils.model_utils import prepare_model, get_model
from ..base_op import OPERATORS, Filter | 1,416 |
@OPERATORS.register_module('language_id_score_filter')
class LanguageIDScoreFilter(Filter):
"""Filter to keep samples in a specific language with confidence score
larger than a specific min value."""
def __init__(self,
lang: str = '',
min_score: ClosedUnitInterval = 0.8,
*args,
**kwargs):
"""
Initialization method.
:param lang: Samples in which language to keep.
:param min_score: The min language identification confidence
scores of samples to keep.
:param args: extra args
:param kwargs: extra args
"""
super().__init__(*args, **kwargs)
self.lang = lang
self.min_score = min_score
self.model_key = prepare_model(lang=lang, model_type='fasttext')
def compute_stats(self, sample):
# check if it's computed already
if StatsKeys.lang in sample[
Fields.stats] and StatsKeys.lang_score in sample[Fields.stats]:
return sample
text = sample[self.text_key].lower().replace('\n', ' ')
|
@OPERATORS.register_module('language_id_score_filter')
class LanguageIDScoreFilter(Filter):
"""Filter to keep samples in a specific language with confidence score
larger than a specific min value."""
def __init__(self,
lang: str = '',
min_score: ClosedUnitInterval = 0.8,
*args,
**kwargs):
"""
Initialization method.
:param lang: Samples in which language to keep.
:param min_score: The min language identification confidence
scores of samples to keep.
:param args: extra args
:param kwargs: extra args
"""
super().__init__(*args, **kwargs)
self.lang = lang
self.min_score = min_score
self.model_key = prepare_model(lang=lang, model_type='fasttext')
def compute_stats(self, sample):
# check if it's computed already
if StatsKeys.lang in sample[
Fields.stats] and StatsKeys.lang_score in sample[Fields.stats]:
return sample
text = sample[self.text_key].lower().replace('\n', ' ') | ft_model = get_model(self.model_key, lang=self.lang, model_type='fasttext') | 3 | 2023-11-13 04:52:55+00:00 | 2k |
52phm/pylmkit | pylmkit/tools/search.py | [
{
"identifier": "Document",
"path": "pylmkit/utils/data_utils.py",
"snippet": "class Document(BaseModel):\n page_content: str\n metadata: dict = Field(default_factory=dict)\n type: str = \"Document\"\n\n def __str__(self):\n return f\"Document(page_content='{self.page_content}', metadata={self.metadata})\""
},
{
"identifier": "BaseKnowledgeBase",
"path": "pylmkit/core/base.py",
"snippet": "class BaseKnowledgeBase(object):\n def __init__(self, init_documents=None):\n self.documents = []\n self.splitter_documents = []\n if isinstance(init_documents, list):\n self.documents = init_documents\n\n @classmethod\n def load(cls, filepath, is_return=True, return_mode=\"doc\", extend=True):\n if filepath.endswith('.json'):\n data = read_json(filepath)\n elif filepath.endswith('.yaml') or filepath.endswith('yml'):\n data = read_yaml(filepath) # data=[{},{}]\n else:\n raise Exception(f\"The file type is not supported\")\n data_dict_as_document = dict_as_document(data)\n result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return,\n extend=extend)\n if is_return:\n return result\n\n @classmethod\n def add(cls, texts, metadatas=None, is_return=True, return_mode=\"doc\", extend=True, types=\"Document\"):\n data_dict_as_document = text_as_document(texts=texts, metadatas=metadatas, types=types)\n result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return,\n extend=extend)\n if is_return:\n return result\n\n def split(self, splitter=None, chunk_size=500, chunk_overlap=100, return_mode='doc', **kwargs):\n if splitter is None:\n splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)\n else:\n splitter = splitter\n self.splitter_documents = splitter.split_documents(self.documents)\n if return_mode == 'doc':\n return self.splitter_documents\n else:\n return document_as_dict(self.splitter_documents)\n\n def to_csv_loader(self, filepath, index=False, **kwargs):\n data = document_as_dict(self.documents)\n pd.DataFrame(data).to_csv(filepath, index=index, **kwargs)\n\n def to_csv_splitter(self,\n filepath,\n splitter=None,\n chunk_size=500,\n chunk_overlap=100,\n index=False,\n splitter_kwargs={},\n csv_kwargs={}\n ):\n if not self.splitter_documents:\n self.splitter_documents = self.split(splitter=splitter, chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, **splitter_kwargs)\n data = document_as_dict(self.splitter_documents)\n pd.DataFrame(data).to_csv(filepath, index=index, **csv_kwargs)\n\n def clear(self, mode='doc'):\n if mode == 'doc':\n self.documents = []\n else:\n self.splitter_documents = []\n\n def _base(self, documents, is_return=True, return_mode='doc', extend=True):\n if extend:\n self.documents.extend(documents) # # dict -> Document\n if is_return:\n if return_mode == 'doc':\n return self.documents\n else:\n return document_as_dict(self.documents)\n else:\n # self.documents = documents # when extend is False, just reset documents\n if is_return:\n if return_mode == 'doc':\n return documents\n else:\n return document_as_dict(documents)"
}
] | from duckduckgo_search import DDGS
from pylmkit.utils.data_utils import Document
from pylmkit.core.base import BaseKnowledgeBase | 1,104 |
class WebSearch(DDGS, BaseKnowledgeBase):
def __init__(
self,
topk=5,
backend="api",
region="wt-wt",
timelimit=None,
safesearch="moderate",
init_documents=None,
timeout=10,
headers=None,
proxies=None
):
DDGS.__init__(
self,
timeout=timeout,
headers=headers,
proxies=proxies
)
BaseKnowledgeBase.__init__(self, init_documents=init_documents)
self.topk = int(topk)
self.backend = backend
self.region = region
self.timelimit = timelimit
self.safesearch = safesearch
def get(self, keyword):
if keyword:
search_gen = super().text(keywords=keyword,
backend=self.backend,
region=self.region,
max_results=self.topk,
timelimit=self.timelimit,
safesearch=self.safesearch
)
for i, page in enumerate(list(search_gen)):
if page:
|
class WebSearch(DDGS, BaseKnowledgeBase):
def __init__(
self,
topk=5,
backend="api",
region="wt-wt",
timelimit=None,
safesearch="moderate",
init_documents=None,
timeout=10,
headers=None,
proxies=None
):
DDGS.__init__(
self,
timeout=timeout,
headers=headers,
proxies=proxies
)
BaseKnowledgeBase.__init__(self, init_documents=init_documents)
self.topk = int(topk)
self.backend = backend
self.region = region
self.timelimit = timelimit
self.safesearch = safesearch
def get(self, keyword):
if keyword:
search_gen = super().text(keywords=keyword,
backend=self.backend,
region=self.region,
max_results=self.topk,
timelimit=self.timelimit,
safesearch=self.safesearch
)
for i, page in enumerate(list(search_gen)):
if page: | self.documents.append(Document( | 0 | 2023-11-18 10:31:58+00:00 | 2k |
hadican/failedkite | app.py | [
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n def __init__(self):\n self.slack_token = self._get_env_variable('SLACK_TOKEN')\n self.default_slack_email = self._get_env_variable('DEFAULT_SLACK_EMAIL')\n self.author_mapping = self._load_author_mapping('/config/author_mapping.yml')\n\n @staticmethod\n def _get_env_variable(name):\n value = os.environ.get(name)\n if not value:\n raise ValueError(f\"{name} environment variable not set\")\n return value\n\n @staticmethod\n def _load_author_mapping(file_path):\n with open(file_path, 'r') as mapping_file:\n return yaml.safe_load(mapping_file)"
},
{
"identifier": "NotificationService",
"path": "notification_service.py",
"snippet": "class NotificationService:\n def __init__(self, slack_client, config):\n self.slack_client = slack_client\n self.config = config\n self.default_user_id = self.slack_client.get_user_id_by_email(self.config.default_slack_email)\n if self.default_user_id is None:\n raise Exception(f\"Failed to retrieve default Slack user ID for the email: {self.config.default_slack_email}\")\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def notify(self, build):\n build_web_url = build['web_url']\n build_source = build.get(\"source\")\n if build_source == \"schedule\":\n trigger_message = f\"Not a human-triggered job, no action was taken for the build url={build_web_url}\"\n self.logger.info(trigger_message)\n return trigger_message, 200\n\n fail_statuses = [\"failing\", \"failed\"]\n build_status = build.get(\"state\")\n if build_status not in fail_statuses:\n build_status_message = f\"Not a build failure, no action taken for the build url={build_web_url}.\"\n return build_status_message, 200\n\n build_creator = build.get(\"creator\")\n build_author = build.get(\"author\")\n\n if build_creator:\n email = build_creator.get(\"email\")\n elif build_author:\n username = build_author.get(\"username\")\n slack_email = self.config.author_mapping.get(username)\n\n if slack_email:\n email = slack_email\n else:\n build_author_message = f\"No user was found in the author mapping with the username={username} for the failing build url={build_web_url}\"\n self.slack_client.send_message(build_author_message, self.default_user_id)\n return build_author_message, 500\n else:\n build_user_message = f\"No user was found for the failing build url={build_web_url}\"\n self.slack_client.send_message(build_user_message, self.default_user_id)\n return build_user_message, 500\n\n user_id = self.slack_client.get_user_id_by_email(email)\n\n if not user_id:\n user_message = f\"Failed to fetch user ID from Slack for email={email} for build url={build_web_url}\"\n return user_message, 500\n\n buildkite_message = f\"Your build has `failed`. Here is the URL to check: {build_web_url}\"\n status = self.slack_client.send_message(buildkite_message, user_id)\n if status:\n return buildkite_message, 200\n else:\n return \"Failed to send Slack message.\", 500"
},
{
"identifier": "SlackClient",
"path": "slack_client.py",
"snippet": "class SlackClient:\n def __init__(self, token):\n self.client = WebClient(token=token)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n def get_user_id_by_email(self, email):\n try:\n response = self.client.users_lookupByEmail(email=email)\n if response[\"ok\"]:\n return response[\"user\"][\"id\"]\n else:\n self.logger.error(\"Failed to fetch user ID for email=%s error=%s\", email, response[\"error\"])\n return None\n except Exception as e:\n self.logger.error(\"Error fetching user ID for email=%s error=%s\", email, str(e))\n return None\n\n def send_message(self, message, user_id):\n try:\n response = self.client.chat_postMessage(channel=user_id, text=message)\n if response[\"ok\"]:\n return True\n else:\n self.logger.error(\"Failed to send Slack message with error=%s\", response[\"error\"])\n return False\n except Exception as e:\n self.logger.error(\"Failed to send Slack message with error=%s\", str(e))\n return False"
}
] | import logging
from flask import Flask, request
from config import Config
from notification_service import NotificationService
from slack_client import SlackClient | 1,094 |
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
config = Config()
slack_client = SlackClient(token=config.slack_token)
|
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
config = Config()
slack_client = SlackClient(token=config.slack_token) | notification_service = NotificationService(slack_client, config) | 1 | 2023-11-11 20:35:31+00:00 | 2k |
PufferAI/pokegym | pokegym/environment.py | [
{
"identifier": "ACTIONS",
"path": "pokegym/pyboy_binding.py",
"snippet": "ACTIONS = (Down, Left, Right, Up, A, B, Start, Select)"
},
{
"identifier": "make_env",
"path": "pokegym/pyboy_binding.py",
"snippet": "def make_env(gb_path, headless=True, quiet=False, **kwargs):\n gb_path='pokemon_red.gb'\n game = PyBoy(\n gb_path,\n debugging=False,\n window_type='headless' if headless else 'SDL2',\n hide_window=quiet,\n **kwargs,\n )\n\n screen = game.botsupport_manager().screen()\n\n if not headless:\n game.set_emulation_speed(6)\n\n return game, screen"
},
{
"identifier": "open_state_file",
"path": "pokegym/pyboy_binding.py",
"snippet": "def open_state_file(path):\n '''Load state file with BytesIO so we can cache it'''\n with open(path, 'rb') as f:\n initial_state = BytesIO(f.read())\n\n return initial_state"
},
{
"identifier": "load_pyboy_state",
"path": "pokegym/pyboy_binding.py",
"snippet": "def load_pyboy_state(pyboy, state):\n '''Reset state stream and load it into PyBoy'''\n state.seek(0)\n pyboy.load_state(state)"
},
{
"identifier": "run_action_on_emulator",
"path": "pokegym/pyboy_binding.py",
"snippet": "def run_action_on_emulator(pyboy, screen, action,\n headless=True, fast_video=True, frame_skip=24):\n '''Sends actions to PyBoy'''\n press, release = action.PRESS, action.RELEASE\n pyboy.send_input(press)\n\n if headless or fast_video:\n pyboy._rendering(False)\n\n frames = []\n for i in range(frame_skip):\n if i == 8: # Release button after 8 frames\n pyboy.send_input(release)\n if not fast_video: # Save every frame\n frames.append(screen.screen_ndarray())\n if i == frame_skip - 1:\n pyboy._rendering(True)\n pyboy.tick()\n\n if fast_video: # Save only the last frame\n frames.append(screen.screen_ndarray())"
},
{
"identifier": "ram_map",
"path": "pokegym/ram_map.py",
"snippet": "HP_ADDR = [0xD16C, 0xD198, 0xD1C4, 0xD1F0, 0xD21C, 0xD248]\nMAX_HP_ADDR = [0xD18D, 0xD1B9, 0xD1E5, 0xD211, 0xD23D, 0xD269]\nPARTY_SIZE_ADDR = 0xD163\nPARTY_ADDR = [0xD164, 0xD165, 0xD166, 0xD167, 0xD168, 0xD169]\nPARTY_LEVEL_ADDR = [0xD18C, 0xD1B8, 0xD1E4, 0xD210, 0xD23C, 0xD268]\nPOKE_XP_ADDR = [0xD179, 0xD1A5, 0xD1D1, 0xD1FD, 0xD229, 0xD255]\nCAUGHT_POKE_ADDR = range(0xD2F7, 0xD309)\nSEEN_POKE_ADDR = range(0xD30A, 0xD31D)\nOPPONENT_LEVEL_ADDR = [0xD8C5, 0xD8F1, 0xD91D, 0xD949, 0xD975, 0xD9A1]\nX_POS_ADDR = 0xD362\nY_POS_ADDR = 0xD361\nMAP_N_ADDR = 0xD35E\nBADGE_1_ADDR = 0xD356\nOAK_PARCEL_ADDR = 0xD74E\nOAK_POKEDEX_ADDR = 0xD74B\nOPPONENT_LEVEL = 0xCFF3\nENEMY_POKE_COUNT = 0xD89C\nEVENT_FLAGS_START_ADDR = 0xD747\nEVENT_FLAGS_END_ADDR = 0xD761\nMUSEUM_TICKET_ADDR = 0xD754\nMONEY_ADDR_1 = 0xD347\nMONEY_ADDR_100 = 0xD348\nMONEY_ADDR_10000 = 0xD349\ndef bcd(num):\ndef bit_count(bits):\ndef read_bit(game, addr, bit) -> bool:\ndef read_uint16(game, start_addr):\ndef position(game):\ndef party(game):\ndef opponent(game):\ndef oak_parcel(game):\ndef pokedex_obtained(game):\ndef pokemon_seen(game):\ndef pokemon_caught(game):\ndef hp(game):\ndef money(game):\ndef badges(game):\ndef events(game):"
},
{
"identifier": "game_map",
"path": "pokegym/game_map.py",
"snippet": "MAP_PATH = __file__.rstrip('game_map.py') + 'map_data.json'\nMAP_DATA = json.load(open(MAP_PATH, 'r'))['regions']\nMAP_DATA = {int(e['id']): e for e in MAP_DATA}\ndef local_to_global(r, c, map_n):"
}
] | from pdb import set_trace as T
from gymnasium import Env, spaces
from pokegym.pyboy_binding import (ACTIONS, make_env, open_state_file,
load_pyboy_state, run_action_on_emulator)
from pokegym import ram_map, game_map
import numpy as np
import os | 1,269 |
def play():
'''Creates an environment and plays it'''
env = Environment(rom_path='pokemon_red.gb', state_path=None, headless=False,
disable_input=False, sound=False, sound_emulated=False, verbose=True
)
env.reset()
env.game.set_emulation_speed(1)
# Display available actions
print("Available actions:")
|
def play():
'''Creates an environment and plays it'''
env = Environment(rom_path='pokemon_red.gb', state_path=None, headless=False,
disable_input=False, sound=False, sound_emulated=False, verbose=True
)
env.reset()
env.game.set_emulation_speed(1)
# Display available actions
print("Available actions:") | for idx, action in enumerate(ACTIONS): | 0 | 2023-11-16 18:34:28+00:00 | 2k |
AlexandrErohin/home-assistant-flightradar24 | custom_components/flightradar24/coordinator.py | [
{
"identifier": "BoundingBox",
"path": "custom_components/flightradar24/models.py",
"snippet": "class BoundingBox:\n \"\"\"Bounding box for retrieving state vectors.\"\"\"\n\n min_latitude: float\n max_latitude: float\n min_longitude: float\n max_longitude: float\n\n def validate(self) -> None:\n \"\"\"Validate if the latitude and longitude are correct.\"\"\"\n self._check_latitude(self.min_latitude)\n self._check_latitude(self.max_latitude)\n self._check_longitude(self.min_longitude)\n self._check_longitude(self.max_longitude)\n\n def get_string(self) -> str:\n return \"{},{},{},{}\".format(self.max_latitude, self.min_latitude, self.min_longitude, self.max_longitude)\n\n @staticmethod\n def _check_latitude(degrees: float) -> None:\n if degrees < -90 or degrees > 90:\n msg = f\"Invalid latitude {degrees}! Must be in [-90, 90].\"\n raise Exception(msg)\n\n @staticmethod\n def _check_longitude(degrees: float) -> None:\n if degrees < -180 or degrees > 180:\n msg = f\"Invalid longitude {degrees}! Must be in [-180, 180].\"\n raise Exception(msg)"
},
{
"identifier": "DOMAIN",
"path": "custom_components/flightradar24/const.py",
"snippet": "DOMAIN = \"flightradar24\""
},
{
"identifier": "URL",
"path": "custom_components/flightradar24/const.py",
"snippet": "URL = 'https://www.flightradar24.com/'"
},
{
"identifier": "DEFAULT_NAME",
"path": "custom_components/flightradar24/const.py",
"snippet": "DEFAULT_NAME = \"FlightRadar24\""
},
{
"identifier": "EVENT_FLIGHTRADAR24_ENTRY",
"path": "custom_components/flightradar24/const.py",
"snippet": "EVENT_FLIGHTRADAR24_ENTRY = f\"{DOMAIN}_entry\""
},
{
"identifier": "EVENT_FLIGHTRADAR24_EXIT",
"path": "custom_components/flightradar24/const.py",
"snippet": "EVENT_FLIGHTRADAR24_EXIT = f\"{DOMAIN}_exit\""
}
] | from typing import Any
from datetime import timedelta
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.helpers.device_registry import DeviceInfo
from .models import BoundingBox
from .const import (
DOMAIN,
URL,
DEFAULT_NAME,
EVENT_FLIGHTRADAR24_ENTRY,
EVENT_FLIGHTRADAR24_EXIT,
)
from logging import Logger
from FlightRadar24 import FlightRadar24API
import math
import pycountry | 669 | from __future__ import annotations
class FlightRadar24Coordinator(DataUpdateCoordinator[int]):
def __init__(
self,
hass: HomeAssistant,
bound: BoundingBox,
client: FlightRadar24API,
update_interval: int,
logger: Logger,
) -> None:
self._bound = bound
self._client = client
self._logger = logger
self.tracked: dict[int, dict[str, Any]] | None = None
self.entered = {}
self.exited = {}
self.device_info = DeviceInfo(
configuration_url=URL,
| from __future__ import annotations
class FlightRadar24Coordinator(DataUpdateCoordinator[int]):
def __init__(
self,
hass: HomeAssistant,
bound: BoundingBox,
client: FlightRadar24API,
update_interval: int,
logger: Logger,
) -> None:
self._bound = bound
self._client = client
self._logger = logger
self.tracked: dict[int, dict[str, Any]] | None = None
self.entered = {}
self.exited = {}
self.device_info = DeviceInfo(
configuration_url=URL, | identifiers={(DOMAIN, DEFAULT_NAME)}, | 1 | 2023-11-16 10:51:24+00:00 | 2k |
ej0cl6/TextEE | TextEE/models/QueryAndExtract/EAEmodel.py | [
{
"identifier": "Metadata",
"path": "TextEE/models/QueryAndExtract/metadata.py",
"snippet": "class Metadata(object):\n def __init__(self, metadata_path, dataset, type_set):\n self.pos_set = ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN',\n 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X']\n self.pos2id = dict((v, i) for v, i in zip(sorted(self.pos_set), range(len(self.pos_set))))\n self.entity_to_ids = {'FAC': 0, 'GPE': 1, 'LOC': 2, 'ORG': 3, 'PER': 4, 'VEH': 5, 'WEA': 6, 'O': 7, '[PAD]': 8}\n\n with open(metadata_path, 'r') as j:\n meta = json.loads(j.read())\n self.dataset = dataset\n self.type_set = type_set\n self.metadata = DatasetFactContainer(meta[dataset], type_set=type_set)\n\n def __str__(self):\n return '\\n'.join(['%s:%s' % item for item in self.__dict__.items()])"
},
{
"identifier": "pad_seq",
"path": "TextEE/models/QueryAndExtract/utils.py",
"snippet": "def pad_seq(data, pad_value=0, dtype='long'):\n N = len(data)\n for i in range(N):\n data[i] = np.array(data[i])\n maxlen = max([len(x) for x in data])\n data = pad_sequences(data, maxlen=maxlen, dtype=dtype, truncating=\"post\", padding=\"post\", value=pad_value)\n return torch.Tensor(data).cuda()"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import ipdb
import ipdb
from transformers import BertConfig, RobertaConfig, BertModel, RobertaModel
from .metadata import Metadata
from .utils import pad_seq
from keras_preprocessing.sequence import pad_sequences | 676 |
class QueryAndExtractEAEModel(nn.Module):
def __init__(self, config, tokenizer, type_set):
super().__init__()
self.config = config
self.tokenizer = tokenizer
self.type_set = type_set
self.earl_model = EARLModel(config, tokenizer, type_set)
self.ner_model = NERModel(config, tokenizer)
def forward(self, batch):
ner_loss = self.ner_model(batch)
loss, score = self.earl_model(batch)
return loss, score, ner_loss
class EARLModel(nn.Module):
def __init__(self, config, tokenizer, type_set):
super().__init__()
self.config = config
self.tokenizer = tokenizer
self.tokenizer_pad_value = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0]
self.type_set = type_set
|
class QueryAndExtractEAEModel(nn.Module):
def __init__(self, config, tokenizer, type_set):
super().__init__()
self.config = config
self.tokenizer = tokenizer
self.type_set = type_set
self.earl_model = EARLModel(config, tokenizer, type_set)
self.ner_model = NERModel(config, tokenizer)
def forward(self, batch):
ner_loss = self.ner_model(batch)
loss, score = self.earl_model(batch)
return loss, score, ner_loss
class EARLModel(nn.Module):
def __init__(self, config, tokenizer, type_set):
super().__init__()
self.config = config
self.tokenizer = tokenizer
self.tokenizer_pad_value = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0]
self.type_set = type_set
| self.metadata = Metadata(config.metadata_path, self.config.dataset, type_set) | 0 | 2023-11-15 21:32:56+00:00 | 2k |
fofr/cog-sdxl-multi-controlnet-lora | controlnet.py | [
{
"identifier": "ControlNetPreprocessor",
"path": "controlnet_preprocess.py",
"snippet": "class ControlNetPreprocessor:\n ANNOTATOR_CLASSES = {\n \"none\": None,\n \"edge_canny\": CannyDetector,\n \"depth_leres\": LeresDetector,\n \"depth_midas\": MidasDetector,\n \"soft_edge_pidi\": PidiNetDetector,\n \"soft_edge_hed\": HEDdetector,\n \"lineart\": LineartDetector,\n \"lineart_anime\": LineartAnimeDetector,\n \"openpose\": OpenposeDetector,\n # \"straight_edge_mlsd\": None,\n # \"face_detector\": None,\n # \"content_shuffle\": None,\n # \"normal_bae\": None,\n # \"segementation_sam\": None,\n }\n\n ANNOTATOR_NAMES = list(ANNOTATOR_CLASSES.keys())\n\n def __init__(self, predictor):\n WeightsDownloader.download_if_not_exists(\n CONTROLNET_PREPROCESSOR_URL, CONTROLNET_PREPROCESSOR_MODEL_CACHE\n )\n\n self.annotators = {}\n self.predictor = predictor\n\n torch.device(\"cuda\")\n\n @staticmethod\n def get_annotator_names():\n return ControlNetPreprocessor.ANNOTATOR_NAMES\n\n def initialize_detector(\n self, detector_class, model_name=\"lllyasviel/Annotators\", **kwargs\n ):\n print(f\"Initializing {detector_class.__name__}\")\n if hasattr(detector_class, 'from_pretrained'):\n return detector_class.from_pretrained(\n model_name,\n cache_dir=CONTROLNET_PREPROCESSOR_MODEL_CACHE,\n **kwargs,\n )\n else:\n return detector_class(**kwargs)\n\n def annotators_list(self):\n return list(self.annotators.keys())\n\n def process_image(self, image, annotator):\n print(f\"Processing image with {annotator}\")\n if annotator not in self.annotators:\n self.annotators[annotator] = self.initialize_detector(\n self.ANNOTATOR_CLASSES[annotator]\n )\n return self.annotators[annotator](image)"
},
{
"identifier": "WeightsDownloader",
"path": "weights_downloader.py",
"snippet": "class WeightsDownloader:\n @staticmethod\n def download_if_not_exists(url, dest):\n if not os.path.exists(dest):\n WeightsDownloader.download(url, dest)\n\n @staticmethod\n def download(url, dest):\n start = time.time()\n print(\"downloading url: \", url)\n print(\"downloading to: \", dest)\n subprocess.check_call([\"pget\", \"-x\", url, dest], close_fds=False)\n print(\"downloading took: \", time.time() - start)"
}
] | import torch
from diffusers import ControlNetModel
from controlnet_preprocess import ControlNetPreprocessor
from weights_downloader import WeightsDownloader | 1,238 |
CONTROLNET_MODEL_CACHE = "./controlnet-cache"
CONTROLNET_URL = "https://weights.replicate.delivery/default/controlnet/sdxl-cn-canny-depth-softe-pose-qr.tar"
class ControlNet:
CONTROLNET_MODELS = [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose",
# Preprocessors without an XL model yet
# "straight_edge_mlsd",
# "face_detector",
# "content_shuffle",
# "normal_bae",
# "segementation_sam",
]
def __init__(self, predictor):
WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE)
self.predictor = predictor
self.controlnet_preprocessor = None
self.models = {}
def initialize_controlnet(self, model_name):
print("Initializing", model_name)
return ControlNetModel.from_pretrained(
model_name, cache_dir=CONTROLNET_MODEL_CACHE, torch_dtype=torch.float16
)
def get_model(self, controlnet_name):
if controlnet_name not in self.models:
if controlnet_name.startswith("edge_"):
self.models[controlnet_name] = self.initialize_controlnet("diffusers/controlnet-canny-sdxl-1.0")
elif controlnet_name.startswith("depth_"):
self.models[controlnet_name] = self.initialize_controlnet("diffusers/controlnet-depth-sdxl-1.0-small")
elif controlnet_name.startswith("soft_edge") or controlnet_name.startswith("lineart"):
self.models[controlnet_name] = self.initialize_controlnet("SargeZT/controlnet-sd-xl-1.0-softedge-dexined")
elif controlnet_name == "openpose":
self.models[controlnet_name] = self.initialize_controlnet("thibaud/controlnet-openpose-sdxl-1.0")
elif controlnet_name == "illusion":
self.models[controlnet_name] = self.initialize_controlnet("monster-labs/control_v1p_sdxl_qrcode_monster")
return self.models.get(controlnet_name)
def get_models(self, controlnet_names):
models = [
self.get_model(controlnet_name) for controlnet_name in controlnet_names
]
return list(filter(None, models))
def preprocess(self, image, controlnet_name):
# Illusion model needs no preprocessing
if controlnet_name == "illusion" or controlnet_name == "none":
return image
if self.controlnet_preprocessor is None:
|
CONTROLNET_MODEL_CACHE = "./controlnet-cache"
CONTROLNET_URL = "https://weights.replicate.delivery/default/controlnet/sdxl-cn-canny-depth-softe-pose-qr.tar"
class ControlNet:
CONTROLNET_MODELS = [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose",
# Preprocessors without an XL model yet
# "straight_edge_mlsd",
# "face_detector",
# "content_shuffle",
# "normal_bae",
# "segementation_sam",
]
def __init__(self, predictor):
WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE)
self.predictor = predictor
self.controlnet_preprocessor = None
self.models = {}
def initialize_controlnet(self, model_name):
print("Initializing", model_name)
return ControlNetModel.from_pretrained(
model_name, cache_dir=CONTROLNET_MODEL_CACHE, torch_dtype=torch.float16
)
def get_model(self, controlnet_name):
if controlnet_name not in self.models:
if controlnet_name.startswith("edge_"):
self.models[controlnet_name] = self.initialize_controlnet("diffusers/controlnet-canny-sdxl-1.0")
elif controlnet_name.startswith("depth_"):
self.models[controlnet_name] = self.initialize_controlnet("diffusers/controlnet-depth-sdxl-1.0-small")
elif controlnet_name.startswith("soft_edge") or controlnet_name.startswith("lineart"):
self.models[controlnet_name] = self.initialize_controlnet("SargeZT/controlnet-sd-xl-1.0-softedge-dexined")
elif controlnet_name == "openpose":
self.models[controlnet_name] = self.initialize_controlnet("thibaud/controlnet-openpose-sdxl-1.0")
elif controlnet_name == "illusion":
self.models[controlnet_name] = self.initialize_controlnet("monster-labs/control_v1p_sdxl_qrcode_monster")
return self.models.get(controlnet_name)
def get_models(self, controlnet_names):
models = [
self.get_model(controlnet_name) for controlnet_name in controlnet_names
]
return list(filter(None, models))
def preprocess(self, image, controlnet_name):
# Illusion model needs no preprocessing
if controlnet_name == "illusion" or controlnet_name == "none":
return image
if self.controlnet_preprocessor is None: | self.controlnet_preprocessor = ControlNetPreprocessor(self.predictor) | 0 | 2023-11-13 13:04:41+00:00 | 2k |
ahayler/s4c | utils/base_trainer.py | [
{
"identifier": "to",
"path": "utils/array_operations.py",
"snippet": "def to(data, device, non_blocking=True):\n if isinstance(data, dict):\n return {k: to(data[k], device, non_blocking=non_blocking) for k in data.keys()}\n elif isinstance(data, list):\n return [to(v, device, non_blocking=non_blocking) for v in data]\n else:\n return data.to(device, non_blocking=non_blocking)"
},
{
"identifier": "MeanMetric",
"path": "utils/metrics.py",
"snippet": "class MeanMetric(Metric):\n def __init__(self, output_transform=lambda x: x[\"output\"], device=\"cpu\"):\n self._sum = None\n self._num_examples = None\n self.required_output_keys = ()\n super(MeanMetric, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self):\n self._sum = torch.tensor(0, device=self._device, dtype=float)\n self._num_examples = 0\n super(MeanMetric, self).reset()\n\n @reinit__is_reduced\n def update(self, value):\n if torch.any(torch.isnan(torch.tensor(value))):\n return\n self._sum += value\n self._num_examples += 1\n\n @sync_all_reduce(\"_num_examples:SUM\", \"_sum:SUM\")\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError('CustomAccuracy must have at least one example before it can be computed.')\n return self._sum.item() / self._num_examples\n\n @torch.no_grad()\n def iteration_completed(self, engine: Engine) -> None:\n output = self._output_transform(engine.state.output)\n self.update(output)"
}
] | import json
import time
import ignite
import ignite.distributed as idist
import torch
from datetime import datetime
from pathlib import Path
from typing import Union
from omegaconf import OmegaConf
from ignite.contrib.engines import common
from ignite.contrib.handlers import TensorboardLogger
from ignite.contrib.handlers.base_logger import BaseHandler
from ignite.engine import Engine, Events, EventEnum
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine
from ignite.utils import manual_seed, setup_logger
from torch.cuda.amp import autocast, GradScaler
from utils.array_operations import to
from utils.metrics import MeanMetric
from torch.backends import cudnn | 1,018 |
# used for debugging
torch.autograd.set_detect_anomaly(True)
def base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize):
# copy the segmentation mode to the data and model_conf part of the config
config['data']['segmentation_mode'] = config.get("segmentation_mode", None)
config['model_conf']['segmentation_mode'] = config.get("segmentation_mode", None)
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name=config["name"])
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = f"stop-on-{config['stop_iteration']}"
folder_name = f"{config['name']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
# Setup dataflow, model, optimizer, criterion
loaders = get_dataflow(config, logger)
if len(loaders) == 2:
train_loader, test_loader = loaders
vis_loader = None
else:
train_loader, test_loader, vis_loader = loaders
if hasattr(train_loader, "dataset"):
logger.info(f"Dataset length: Train: {len(train_loader.dataset)}, Test: {len(test_loader.dataset)}")
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config, logger)
logger.info(f"Model parameters: {sum(p.numel() for p in model.parameters())}")
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = get_metrics(config, device)
|
# used for debugging
torch.autograd.set_detect_anomaly(True)
def base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize):
# copy the segmentation mode to the data and model_conf part of the config
config['data']['segmentation_mode'] = config.get("segmentation_mode", None)
config['model_conf']['segmentation_mode'] = config.get("segmentation_mode", None)
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name=config["name"])
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = f"stop-on-{config['stop_iteration']}"
folder_name = f"{config['name']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
# Setup dataflow, model, optimizer, criterion
loaders = get_dataflow(config, logger)
if len(loaders) == 2:
train_loader, test_loader = loaders
vis_loader = None
else:
train_loader, test_loader, vis_loader = loaders
if hasattr(train_loader, "dataset"):
logger.info(f"Dataset length: Train: {len(train_loader.dataset)}, Test: {len(test_loader.dataset)}")
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config, logger)
logger.info(f"Model parameters: {sum(p.numel() for p in model.parameters())}")
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = get_metrics(config, device) | metrics_loss = {k: MeanMetric((lambda y: lambda x: x["loss_dict"][y])(k)) for k in criterion.get_loss_metric_names()} | 1 | 2023-11-12 21:53:27+00:00 | 2k |
Emmo00/alxcheck | alxcheck/checks/python.py | [
{
"identifier": "print_no_module_docstring",
"path": "alxcheck/utils/error_logging.py",
"snippet": "def print_no_module_docstring(file_path):\n print(Fore.RED + f\"{file_path} does not have Module DocString\" + Fore.RESET)"
},
{
"identifier": "print_no_function_docstring",
"path": "alxcheck/utils/error_logging.py",
"snippet": "def print_no_function_docstring(file_path, function_name):\n print(\n Fore.RED\n + f\"In {file_path}, the {function_name} function has no Function DocString\"\n + Fore.RESET\n )"
},
{
"identifier": "print_no_class_docstring",
"path": "alxcheck/utils/error_logging.py",
"snippet": "def print_no_class_docstring(file_path, class_name):\n print(\n Fore.RED\n + f\"In {file_path}, the {class_name} class has no Class DocString\"\n + Fore.RESET\n )"
},
{
"identifier": "print_check_docstrings",
"path": "alxcheck/utils/error_logging.py",
"snippet": "def print_check_docstrings(file_path):\n print(Fore.RED + f\"Error: Check docstrings in {file_path}\" + Fore.RESET)"
},
{
"identifier": "print_error_parsing_file",
"path": "alxcheck/utils/error_logging.py",
"snippet": "def print_error_parsing_file(file_path):\n import ast\n\n try:\n with open(file_path, \"r\") as f:\n ast.parse(f.read())\n except SyntaxError as syntax_error:\n print(\n Fore.RED\n + f\"SyntaxError\\n\\tFile: {file_path}\\n\\tLine: {syntax_error.lineno}\\tMessage: {syntax_error.msg}\"\n + Fore.RESET\n )\n except Exception as e:\n print(Fore.RED + f\"Error Parsing File:\\n\\t{type(e)}\" + Fore.RESET)"
}
] | import os
import ast
import subprocess
from ..utils.error_logging import (
print_no_module_docstring,
print_no_function_docstring,
print_no_class_docstring,
print_check_docstrings,
print_error_parsing_file,
) | 800 |
def check_file_is_executable(file_path):
flag = True
if not os.access(file_path, os.X_OK):
flag = False
return flag
def check_python_shebang(file_path):
flag = True
with open(file_path, "rb") as f:
first_line = f.readline().strip()
if first_line not in (b"#!/usr/bin/python3", b"#!/usr/bin/env python3"):
flag = False
return flag
def check_module_function_class_documentation(file_path):
flag = True
with open(file_path, "rb") as f:
content = f.read()
# remove shebang
if content.startswith(b"#!"):
if len(content.split(b"\n")) < 2:
content = ""
else:
content = content.split(b"\n", 1)[1]
tree = None
try:
tree = ast.parse(content)
except Exception:
print_error_parsing_file(file_path)
try:
if tree is None:
return
for node in ast.walk(tree):
# check module docstring
if isinstance(node, ast.Module):
if not isinstance(node.body[0].value, ast.Str):
flag = False
print_no_module_docstring(file_path)
return
# check function docstring
if isinstance(node, ast.FunctionDef) and not isinstance(
node.body[0].value, ast.Str
):
flag = False
print_no_function_docstring(file_path, node.name)
# check class docstring
if isinstance(node, ast.ClassDef) and not isinstance(
node.body[0].value, ast.Str
):
flag = False
print_no_class_docstring(file_path, node.name)
except Exception:
|
def check_file_is_executable(file_path):
flag = True
if not os.access(file_path, os.X_OK):
flag = False
return flag
def check_python_shebang(file_path):
flag = True
with open(file_path, "rb") as f:
first_line = f.readline().strip()
if first_line not in (b"#!/usr/bin/python3", b"#!/usr/bin/env python3"):
flag = False
return flag
def check_module_function_class_documentation(file_path):
flag = True
with open(file_path, "rb") as f:
content = f.read()
# remove shebang
if content.startswith(b"#!"):
if len(content.split(b"\n")) < 2:
content = ""
else:
content = content.split(b"\n", 1)[1]
tree = None
try:
tree = ast.parse(content)
except Exception:
print_error_parsing_file(file_path)
try:
if tree is None:
return
for node in ast.walk(tree):
# check module docstring
if isinstance(node, ast.Module):
if not isinstance(node.body[0].value, ast.Str):
flag = False
print_no_module_docstring(file_path)
return
# check function docstring
if isinstance(node, ast.FunctionDef) and not isinstance(
node.body[0].value, ast.Str
):
flag = False
print_no_function_docstring(file_path, node.name)
# check class docstring
if isinstance(node, ast.ClassDef) and not isinstance(
node.body[0].value, ast.Str
):
flag = False
print_no_class_docstring(file_path, node.name)
except Exception: | print_check_docstrings(file_path) | 3 | 2023-11-14 19:28:28+00:00 | 2k |
TimbreWatermarking/TimbreWatermarking | voice.clone/Fastspeech2/TTS/tts/utils/text/cleaners.py | [
{
"identifier": "abbreviations_en",
"path": "voice.clone/Fastspeech2/TTS/tts/utils/text/english/abbreviations.py",
"snippet": ""
},
{
"identifier": "normalize_numbers",
"path": "voice.clone/Fastspeech2/TTS/tts/utils/text/english/number_norm.py",
"snippet": "def normalize_numbers(text):\n text = re.sub(_comma_number_re, _remove_commas, text)\n text = re.sub(_currency_re, _expand_currency, text)\n text = re.sub(_decimal_number_re, _expand_decimal_point, text)\n text = re.sub(_ordinal_re, _expand_ordinal, text)\n text = re.sub(_number_re, _expand_number, text)\n return text"
},
{
"identifier": "expand_time_english",
"path": "voice.clone/Fastspeech2/TTS/tts/utils/text/english/time_norm.py",
"snippet": "def expand_time_english(text: str) -> str:\n return re.sub(_time_re, _expand_time_english, text)"
},
{
"identifier": "abbreviations_fr",
"path": "voice.clone/Fastspeech2/TTS/tts/utils/text/french/abbreviations.py",
"snippet": ""
}
] | import re
from anyascii import anyascii
from TTS.tts.utils.text.chinese_mandarin.numbers import replace_numbers_to_characters_in_text
from .english.abbreviations import abbreviations_en
from .english.number_norm import normalize_numbers as en_normalize_numbers
from .english.time_norm import expand_time_english
from .french.abbreviations import abbreviations_fr | 776 | """Set of default text cleaners"""
# TODO: pick the cleaner for languages dynamically
# Regular expression matching whitespace:
_whitespace_re = re.compile(r"\s+")
def expand_abbreviations(text, lang="en"):
if lang == "en":
_abbreviations = abbreviations_en
elif lang == "fr":
_abbreviations = abbreviations_fr
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text).strip()
def convert_to_ascii(text):
return anyascii(text)
def remove_aux_symbols(text):
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
return text
def replace_symbols(text, lang="en"):
text = text.replace(";", ",")
text = text.replace("-", " ")
text = text.replace(":", ",")
if lang == "en":
text = text.replace("&", " and ")
elif lang == "fr":
text = text.replace("&", " et ")
elif lang == "pt":
text = text.replace("&", " e ")
return text
def basic_cleaners(text):
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
"""Pipeline for non-English text that transliterates to ASCII."""
# text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def basic_german_cleaners(text):
"""Pipeline for German text"""
text = lowercase(text)
text = collapse_whitespace(text)
return text
# TODO: elaborate it
def basic_turkish_cleaners(text):
"""Pipeline for Turkish text"""
text = text.replace("I", "ı")
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
"""Pipeline for English text, including number and abbreviation expansion."""
# text = convert_to_ascii(text)
text = lowercase(text)
| """Set of default text cleaners"""
# TODO: pick the cleaner for languages dynamically
# Regular expression matching whitespace:
_whitespace_re = re.compile(r"\s+")
def expand_abbreviations(text, lang="en"):
if lang == "en":
_abbreviations = abbreviations_en
elif lang == "fr":
_abbreviations = abbreviations_fr
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text).strip()
def convert_to_ascii(text):
return anyascii(text)
def remove_aux_symbols(text):
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
return text
def replace_symbols(text, lang="en"):
text = text.replace(";", ",")
text = text.replace("-", " ")
text = text.replace(":", ",")
if lang == "en":
text = text.replace("&", " and ")
elif lang == "fr":
text = text.replace("&", " et ")
elif lang == "pt":
text = text.replace("&", " e ")
return text
def basic_cleaners(text):
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
"""Pipeline for non-English text that transliterates to ASCII."""
# text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def basic_german_cleaners(text):
"""Pipeline for German text"""
text = lowercase(text)
text = collapse_whitespace(text)
return text
# TODO: elaborate it
def basic_turkish_cleaners(text):
"""Pipeline for Turkish text"""
text = text.replace("I", "ı")
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
"""Pipeline for English text, including number and abbreviation expansion."""
# text = convert_to_ascii(text)
text = lowercase(text) | text = expand_time_english(text) | 2 | 2023-11-13 01:40:03+00:00 | 2k |
nillion-oss/tinysig | src/tinysig/network.py | [
{
"identifier": "add",
"path": "src/tinysig/utils.py",
"snippet": "def add(values: list[int], size: int) -> int:\n \"\"\"\n Calculate the sum of a list of integers modulo 'size'.\n\n Args:\n values (list[int]): A list of integers to be summed.\n size (int): The modulo value.\n\n Returns:\n int: The sum of the integers in 'values' modulo 'size'.\n\n Examples:\n >>> add([2, 4, 6], 5)\n 2\n >>> add([3, 7, 10], 4)\n 0\n \"\"\"\n\n result = 0\n for v in values:\n result = (result + v) % size\n return result"
},
{
"identifier": "generate_additive_shares",
"path": "src/tinysig/utils.py",
"snippet": "def generate_additive_shares(secret: int, n: int, size: int) -> list[int]:\n \"\"\"\n Generates additive secret shares for a given secret value, using modular arithmetic.\n\n Args:\n secret (int): The secret value to be shared.\n n (int): The number of shares to generate.\n size (int): The modulus value for modular arithmetic.\n\n Returns:\n List[int]: A list of additive secret shares.\n\n Example:\n >>> random.seed(0)\n >>> generate_additive_shares(26, 3, 2**5)\n [8, 24, 26]\n \"\"\"\n shares = [rand(size) for _ in range(n-1)]\n last_sh = (secret - add(shares, size)) % size\n shares = [last_sh] + shares\n\n return shares"
}
] | from dataclasses import dataclass, field
from typing import Dict, List, Union
from .utils import add, generate_additive_shares | 1,515 |
@dataclass
class Node:
""" Represents a node in the network."""
id: int
"""Identifier for the node."""
shares_db: Dict[str, int] = field(default_factory=dict)
"""Database for holding shares."""
open_db: Dict[str, int] = field(default_factory=dict)
"""Database for holding open values."""
he_public_keys: Dict[int, int] = field(default_factory=dict)
"""Dictionary for holding homomorphic encryption public keys."""
def get_share(self, label: str) -> None:
"""Retrieve a share from the 'shares_db'."""
return self.shares_db[label]
def get_open(self, label: str) -> None:
"""Retrieve an open value from the 'open_db'."""
return self.open_db[label]
def set_share(self, value, label: str) -> None:
"""Set a share in the 'shares_db'."""
self.shares_db[label] = value
def set_open(self, value, label: str) -> None:
"""Set an open value in the 'open_db'."""
self.open_db[label] = value
def delete_share(self, label: str) -> None:
"""Delete a share from the 'shares_db'."""
self.shares_db.pop(label)
def delete_open(self, label: str) -> None:
"""Delete an open value from the 'open_db'."""
self.open_db.pop(label)
@dataclass
class Client(Node):
"""Represents a client node in the network, inheriting from the 'Node' class."""
he_private_key: int = field(default=0)
class Network:
"""Represents a network of nodes and clients.
Manages the interactions and cryptographic operations within the network,
including sharing secrets, broadcasting values, and reconstructing shared values.
"""
nodes: List[Node]
"""List of nodes in the network."""
clients: List[Client]
"""List of clients in the network."""
q: int
"""Prime field."""
h: int
"""Multiplicative field generator."""
def __init__(self, N, q, h=2, C=1):
"""
Initialize the network with 'N' nodes, prime field 'q', field generator 'h', and 'C' clients.
Parameters:
N (int): Number of nodes in the network.
q (int): Prime field.
h (int): Multiplicative field generator (default is 2).
C (int): Number of clients in the network (default is 1).
"""
self.nodes = [Node(i+1) for i in range(N)]
self.clients = [Client(i+1) for i in range(C)]
self.N = N
self.q = q
self.h = h
def print(self):
"""Print a readable representation of the network, including nodes and clients with their databases."""
print(f"Network(N={len(self.nodes)}, q={self.q},")
print(" nodes=[")
for node in self.nodes:
print(f" Node(id={node.id},")
print(" shares_db={")
for key, value in node.shares_db.items():
print(f" {key}: {value},")
print(" },")
print(" public_keys={")
for key, value in node.he_public_keys.items():
print(f" {key}: {value},")
print(" },")
print(" open_db={")
for key, value in node.open_db.items():
print(f" {key}: {value},")
print(" }")
print(" )")
print(" ]\n)")
print(" clients=[")
for client in self.clients:
print(f" Client(id={client.id},")
print(" shares_db={")
for key, value in client.shares_db.items():
print(f" {key}: {value},")
print(" },")
print(" public_keys={")
for key, value in client.he_public_keys.items():
print(f" {key}: {value},")
print(" },")
print(f" private_keys={client.he_private_key},")
print(" open_db={")
for key, value in client.open_db.items():
print(f" {key}: {value},")
print(" }")
print(" )")
print(" ]\n)")
def reconstruct_local(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None:
"""Locally reconstruct exponent share ('exp') or base ('base') shared value."""
type_label = "_sh_exp" if type_share == "exp" else "_sh_base"
p = (self.q - 1) if type_share == "exp" else self.q
shares = [party.get_share(get_label+type_label+"_node_"+str(node.id)) for node in self.nodes]
|
@dataclass
class Node:
""" Represents a node in the network."""
id: int
"""Identifier for the node."""
shares_db: Dict[str, int] = field(default_factory=dict)
"""Database for holding shares."""
open_db: Dict[str, int] = field(default_factory=dict)
"""Database for holding open values."""
he_public_keys: Dict[int, int] = field(default_factory=dict)
"""Dictionary for holding homomorphic encryption public keys."""
def get_share(self, label: str) -> None:
"""Retrieve a share from the 'shares_db'."""
return self.shares_db[label]
def get_open(self, label: str) -> None:
"""Retrieve an open value from the 'open_db'."""
return self.open_db[label]
def set_share(self, value, label: str) -> None:
"""Set a share in the 'shares_db'."""
self.shares_db[label] = value
def set_open(self, value, label: str) -> None:
"""Set an open value in the 'open_db'."""
self.open_db[label] = value
def delete_share(self, label: str) -> None:
"""Delete a share from the 'shares_db'."""
self.shares_db.pop(label)
def delete_open(self, label: str) -> None:
"""Delete an open value from the 'open_db'."""
self.open_db.pop(label)
@dataclass
class Client(Node):
"""Represents a client node in the network, inheriting from the 'Node' class."""
he_private_key: int = field(default=0)
class Network:
"""Represents a network of nodes and clients.
Manages the interactions and cryptographic operations within the network,
including sharing secrets, broadcasting values, and reconstructing shared values.
"""
nodes: List[Node]
"""List of nodes in the network."""
clients: List[Client]
"""List of clients in the network."""
q: int
"""Prime field."""
h: int
"""Multiplicative field generator."""
def __init__(self, N, q, h=2, C=1):
"""
Initialize the network with 'N' nodes, prime field 'q', field generator 'h', and 'C' clients.
Parameters:
N (int): Number of nodes in the network.
q (int): Prime field.
h (int): Multiplicative field generator (default is 2).
C (int): Number of clients in the network (default is 1).
"""
self.nodes = [Node(i+1) for i in range(N)]
self.clients = [Client(i+1) for i in range(C)]
self.N = N
self.q = q
self.h = h
def print(self):
"""Print a readable representation of the network, including nodes and clients with their databases."""
print(f"Network(N={len(self.nodes)}, q={self.q},")
print(" nodes=[")
for node in self.nodes:
print(f" Node(id={node.id},")
print(" shares_db={")
for key, value in node.shares_db.items():
print(f" {key}: {value},")
print(" },")
print(" public_keys={")
for key, value in node.he_public_keys.items():
print(f" {key}: {value},")
print(" },")
print(" open_db={")
for key, value in node.open_db.items():
print(f" {key}: {value},")
print(" }")
print(" )")
print(" ]\n)")
print(" clients=[")
for client in self.clients:
print(f" Client(id={client.id},")
print(" shares_db={")
for key, value in client.shares_db.items():
print(f" {key}: {value},")
print(" },")
print(" public_keys={")
for key, value in client.he_public_keys.items():
print(f" {key}: {value},")
print(" },")
print(f" private_keys={client.he_private_key},")
print(" open_db={")
for key, value in client.open_db.items():
print(f" {key}: {value},")
print(" }")
print(" )")
print(" ]\n)")
def reconstruct_local(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None:
"""Locally reconstruct exponent share ('exp') or base ('base') shared value."""
type_label = "_sh_exp" if type_share == "exp" else "_sh_base"
p = (self.q - 1) if type_share == "exp" else self.q
shares = [party.get_share(get_label+type_label+"_node_"+str(node.id)) for node in self.nodes] | reconstructed = add(shares, p) | 0 | 2023-11-14 13:55:41+00:00 | 2k |
naver-ai/scob | lightning_modules/data_modules/transforms/transformer_decoder.py | [
{
"identifier": "TRANSFORM_NAME_TO_CLASS",
"path": "lightning_modules/data_modules/transforms/common.py",
"snippet": "TRANSFORM_NAME_TO_CLASS = {\n \"RandomRotate\": RandomRotate,\n \"CraftRandomCrop\": CraftRandomCrop,\n \"Resize\": Resize,\n \"ResizeOD\": ResizeOD,\n \"PhotometricDistort\": PhotometricDistort,\n \"MoCo_PhotometricDistort\": MoCo_PhotometricDistort,\n \"ResizeTwoPic\": ResizeTwoPic,\n \"ResizeMultiview\": ResizeMultiview,\n \"KeepAspectRatioBilinearResize\": KeepAspectRatioBilinearResize,\n \"RandomCrop\": RandomCrop,\n \"MultiScaleResize\": MultiScaleResize,\n \"KeepAspectRatioBilinearResizeOD\": KeepAspectRatioBilinearResizeOD,\n \"Otor_OriginDistort\": Otor_OriginDistort,\n}"
},
{
"identifier": "W_Compose",
"path": "lightning_modules/data_modules/transforms/common.py",
"snippet": "class W_Compose:\n \"\"\"\n Modified pytorch compose\n pytorch.org/vision/0.10/transforms.html#torchvision.transforms.Compose\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img, quads=None):\n second_img = None\n multiview_img = None\n for transform in self.transforms:\n if (\n isinstance(transform, ResizeTwoPic)\n and transform.second_size is not None\n ):\n img, second_img, quads = transform(img, quads)\n\n elif isinstance(transform, ResizeMultiview):\n img, multiview_img, quads = transform(img, quads)\n\n elif isinstance(transform, MoCo_PhotometricDistort):\n img, _ = transform(img, quads)\n multiview_img, quads = transform(multiview_img, quads)\n else:\n img, quads = transform(img, quads)\n\n if second_img is None and multiview_img is None:\n return img, quads\n elif multiview_img is not None:\n return img, multiview_img, quads\n else:\n return img, second_img, quads\n\n def __repr__(self):\n format_string = self.__class__.__name__ + \"(\"\n for transform in self.transforms:\n format_string += f\"\\n {transform}\"\n format_string += \"\\n)\"\n return format_string"
},
{
"identifier": "get_image_normalize_mean_and_std",
"path": "utils/dataset_utils.py",
"snippet": "def get_image_normalize_mean_and_std(image_normalize):\n if image_normalize is None:\n mean_and_std = None\n elif image_normalize == \"imagenet_default\":\n mean_and_std = (IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)\n elif image_normalize == \"imagenet_inception\":\n # In BEiT, \"--imagenet_default_mean_and_std: enable this for ImageNet-1k pre-training,\n # i.e., (0.485, 0.456, 0.406) for mean and (0.229, 0.224, 0.225) for std.\n # We use (0.5, 0.5, 0.5) for mean and (0.5, 0.5, 0.5) for std by default\n # on other pre-training data.\"\n mean_and_std = (IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD)\n else:\n raise ValueError(f\"Unknown image_normalize={image_normalize}\")\n\n return mean_and_std"
}
] | from typing import List, Tuple, Union
from lightning_modules.data_modules.transforms.common import (
TRANSFORM_NAME_TO_CLASS,
W_Compose,
)
from utils.dataset_utils import get_image_normalize_mean_and_std
import torch
import torchvision.transforms as transforms | 1,240 |
class TransformerDecoderTransformForFineTuning:
"""
- BEiT:
https://github.com/microsoft/unilm/blob/master/beit/datasets.py#L27
- TrOCR:
https://github.com/microsoft/unilm/blob/53995b4876464146365693396aaaa09e88a4494e/trocr/data_aug.py#L120
"""
def __init__(
self,
size: Union[Tuple, List],
transforms_list=None,
image_normalize="imagenet_default",
):
self.common_transform = self.__get_common_transform(size, transforms_list)
self.patch_transform = self.__get_patch_transform(image_normalize)
def __call__(self, img, quads):
for_patches, quads = self.common_transform(img, quads)
for_patches = self.patch_transform(for_patches)
return for_patches, quads
@staticmethod
def __get_common_transform(size, transforms_list):
tranforms = []
for transform_obj in transforms_list:
transform_class = TRANSFORM_NAME_TO_CLASS[transform_obj.name]
if transform_obj.params is not None:
params = dict(transform_obj.params)
else:
params = {}
if transform_obj.name in [
"Resize",
"ResizeOD",
"KeepAspectRatioBilinearResize",
"ResizeMultiview",
"MultiScaleResize",
"KeepAspectRatioBilinearResizeOD",
]:
params["size"] = size
elif transform_obj.name == "ResizeTwoPic":
params["size"] = size
tranforms.append(transform_class(**params))
return W_Compose(tranforms)
@staticmethod
def __get_patch_transform(image_normalize):
patch_trans = [transforms.ToTensor()]
|
class TransformerDecoderTransformForFineTuning:
"""
- BEiT:
https://github.com/microsoft/unilm/blob/master/beit/datasets.py#L27
- TrOCR:
https://github.com/microsoft/unilm/blob/53995b4876464146365693396aaaa09e88a4494e/trocr/data_aug.py#L120
"""
def __init__(
self,
size: Union[Tuple, List],
transforms_list=None,
image_normalize="imagenet_default",
):
self.common_transform = self.__get_common_transform(size, transforms_list)
self.patch_transform = self.__get_patch_transform(image_normalize)
def __call__(self, img, quads):
for_patches, quads = self.common_transform(img, quads)
for_patches = self.patch_transform(for_patches)
return for_patches, quads
@staticmethod
def __get_common_transform(size, transforms_list):
tranforms = []
for transform_obj in transforms_list:
transform_class = TRANSFORM_NAME_TO_CLASS[transform_obj.name]
if transform_obj.params is not None:
params = dict(transform_obj.params)
else:
params = {}
if transform_obj.name in [
"Resize",
"ResizeOD",
"KeepAspectRatioBilinearResize",
"ResizeMultiview",
"MultiScaleResize",
"KeepAspectRatioBilinearResizeOD",
]:
params["size"] = size
elif transform_obj.name == "ResizeTwoPic":
params["size"] = size
tranforms.append(transform_class(**params))
return W_Compose(tranforms)
@staticmethod
def __get_patch_transform(image_normalize):
patch_trans = [transforms.ToTensor()]
| mean_and_std = get_image_normalize_mean_and_std(image_normalize) | 2 | 2023-11-15 00:40:08+00:00 | 2k |
speckai/speck | src/python/speck/connections/connector.py | [
{
"identifier": "ChatLogger",
"path": "src/python/speck/chat/entities.py",
"snippet": "NOT_GIVEN = None\nclass Message(BaseModel):\nclass SafeDict(dict):\nclass Prompt(str):\nclass Response(BaseModel):\nclass MessageChunk(BaseModel):\nclass Stream:\nclass LogConfig(BaseModel):\n class Config:\nclass ChatConfig:\nclass OpenAIChatConfig(ChatConfig):\nclass IChatClient(ABC):\n def __missing__(self, key):\n def to_dict(self):\n def __init__(\n self,\n messages: PromptTypes,\n variables: Union[dict[str, str], None] = None,\n **kwargs,\n ):\n def create(\n cls, messages: PromptTypes, variables: dict[str, str] = None\n ) -> \"Prompt\":\n def _read(cls, lines: str) -> \"Prompt\":\n def add_message():\n def read(cls, path: str, name: Union[str, None] = None) -> \"Prompt\":\n def read_all(cls, path: str) -> dict[str, \"Prompt\"]:\n def _file(self):\n def write(cls, prompt: Union[\"Prompt\", dict[str, \"Prompt\"]], path: str):\n def __new__(\n cls,\n messages: PromptTypes,\n **kwargs,\n ):\n def from_openai(cls, messages: list[dict[str, str]]):\n def to_list(self):\n def to_dict(self):\n def _apply_variables(\n messages: list[Message], variables: dict[str, str]\n ) -> list[Message]:\n def _check_duplicate_keys(self, other_variables: dict[str, str]) -> dict[str, str]:\n def _remove_duplicate_keys_from_messages(\n self, other_variables: dict[str, str]\n ) -> list[Message]:\n def format(self, *args, **kwargs):\n def __add__(self, other):\n def __str__(self):\n def __init__(\n self,\n content: str,\n closed: bool = False,\n prompt_tokens: Union[int, None] = None,\n completion_tokens: Union[int, None] = None,\n raw: Union[dict, None] = None,\n **kwargs,\n ):\n def create(cls, response: ResponseTypes) -> \"Response\":\n def __str__(self):\n def encode(self, encoding: str = \"utf-8\"):\n def __init__(\n self,\n client: \"Speck\",\n iterator: Iterator[Any],\n kwargs: dict,\n log_config: \"LogConfig\",\n processor: Callable[[Any], MessageChunk],\n ):\n def _log(self):\n def _process(self, item) -> MessageChunk:\n def __next__(self) -> MessageChunk:\n def __iter__(self) -> Iterator[MessageChunk]:\n def close(self):\n def __init__(\n self,\n *,\n provider: str = None,\n model: OpenAIModel,\n stream: bool = False,\n _log: bool = True,\n temperature: Union[Optional[float], NotGiven] = NOT_GIVEN,\n max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN,\n top_p: Union[Optional[float], NotGiven] = NOT_GIVEN,\n frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,\n presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ):\n def to_dict(self):\n def _convert_optional(self, value):\n def create(cls, config: ChatConfigTypes, kwargs: dict = None) -> \"ChatConfig\":\n def get(self, key: str, default: Any = None) -> Any:\n def convert(self, provider: str = \"speck\") -> \"ChatConfig\":\n def log_chat(\n self,\n *,\n log_config: LogConfig,\n prompt: Prompt,\n response: Response,\n provider: str = \"speck\",\n ):\n def encode(self, encoding: str = \"utf-8\"):\n def __str__(self):\n def __init__(\n self,\n model: OpenAIModel,\n stream: bool = False,\n _log: bool = True,\n temperature: Union[Optional[float], NotGiven] = NOT_GIVEN,\n max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN,\n top_p: Union[Optional[float], NotGiven] = NOT_GIVEN,\n frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,\n presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ):\n def convert(self, provider: str = \"speck\") -> ChatConfig:\n def debug_chat(\n self, prompt: \"Prompt\", config: \"ChatConfig\"\n ) -> (\"Prompt\", \"ChatConfig\"):\n def chat(\n self,\n prompt: PromptTypes,\n config: Union[ChatConfig, NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ) -> Union[Response, Stream]:\n async def achat(\n self,\n prompt: PromptTypes,\n config: Union[ChatConfig, NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ) -> Union[Response, Stream]:"
},
{
"identifier": "Providers",
"path": "src/python/speck/connections/providers.py",
"snippet": "class Providers(Enum):\n Anthropic = \"Anthropic\"\n AzureOpenAI = \"AzureOpenAI\"\n OpenAI = \"OpenAI\"\n CustomProvider = \"CustomProvider\"\n Replicate = \"Replicate\""
}
] | from abc import ABC
from ..chat.entities import ChatLogger, LogConfig, Prompt, Response
from .providers import Providers | 1,521 |
class IConnector(ABC):
_client: "Speck"
def __init__(self, client: "Speck", provider: Providers):
self._client = client
self.provider = provider
# @abstractmethod
# def process_message(self, messages: Messages, model: str) -> str:
# pass
def _get_log_kwargs(self, prompt: Prompt, response: Response, **kwargs):
return {
"provider": self.provider,
"model": kwargs.get("model"),
"temperature": kwargs.get("temperature"),
"stream": kwargs.get("stream", False),
"prompt": prompt,
"config": kwargs,
"response": response,
}
def log(
self, *, log_config: LogConfig, prompt: Prompt, response: Response, **kwargs
):
# Todo: refactor to use config.log_chat !!!
|
class IConnector(ABC):
_client: "Speck"
def __init__(self, client: "Speck", provider: Providers):
self._client = client
self.provider = provider
# @abstractmethod
# def process_message(self, messages: Messages, model: str) -> str:
# pass
def _get_log_kwargs(self, prompt: Prompt, response: Response, **kwargs):
return {
"provider": self.provider,
"model": kwargs.get("model"),
"temperature": kwargs.get("temperature"),
"stream": kwargs.get("stream", False),
"prompt": prompt,
"config": kwargs,
"response": response,
}
def log(
self, *, log_config: LogConfig, prompt: Prompt, response: Response, **kwargs
):
# Todo: refactor to use config.log_chat !!! | ChatLogger.log( | 0 | 2023-11-15 05:46:05+00:00 | 2k |
chaiNNer-org/spandrel | src/spandrel/architectures/KBNet/arch/kbnet_s.py | [
{
"identifier": "KBAFunction",
"path": "src/spandrel/architectures/KBNet/arch/kb_utils.py",
"snippet": "class KBAFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, att, selfk, selfg, selfb, selfw):\n B, nset, H, W = att.shape\n KK = selfk**2\n selfc = x.shape[1]\n\n att = att.reshape(B, nset, H * W).transpose(-2, -1)\n\n ctx.selfk, ctx.selfg, ctx.selfc, ctx.KK, ctx.nset = (\n selfk,\n selfg,\n selfc,\n KK,\n nset,\n )\n ctx.x, ctx.att, ctx.selfb, ctx.selfw = x, att, selfb, selfw\n\n bias = att @ selfb\n attk = att @ selfw\n\n uf = torch.nn.functional.unfold(x, kernel_size=selfk, padding=selfk // 2)\n\n # for unfold att / less memory cost\n uf = uf.reshape(B, selfg, selfc // selfg * KK, H * W).permute(0, 3, 1, 2)\n attk = attk.reshape(B, H * W, selfg, selfc // selfg, selfc // selfg * KK)\n\n x = attk @ uf.unsqueeze(-1) #\n del attk, uf\n x = x.squeeze(-1).reshape(B, H * W, selfc) + bias\n x = x.transpose(-1, -2).reshape(B, selfc, H, W)\n return x\n\n @staticmethod\n def backward(ctx, grad_output):\n x, att, selfb, selfw = ctx.x, ctx.att, ctx.selfb, ctx.selfw\n selfk, selfg, selfc, KK, nset = (\n ctx.selfk,\n ctx.selfg,\n ctx.selfc,\n ctx.KK,\n ctx.nset,\n )\n\n B, selfc, H, W = grad_output.size()\n\n dbias = grad_output.reshape(B, selfc, H * W).transpose(-1, -2)\n\n dselfb = att.transpose(-2, -1) @ dbias\n datt = dbias @ selfb.transpose(-2, -1)\n\n attk = att @ selfw\n uf = F.unfold(x, kernel_size=selfk, padding=selfk // 2)\n # for unfold att / less memory cost\n uf = uf.reshape(B, selfg, selfc // selfg * KK, H * W).permute(0, 3, 1, 2)\n attk = attk.reshape(B, H * W, selfg, selfc // selfg, selfc // selfg * KK)\n\n dx = dbias.view(B, H * W, selfg, selfc // selfg, 1)\n\n dattk = dx @ uf.view(B, H * W, selfg, 1, selfc // selfg * KK)\n duf = attk.transpose(-2, -1) @ dx\n del attk, uf\n\n dattk = dattk.view(B, H * W, -1)\n datt += dattk @ selfw.transpose(-2, -1)\n dselfw = att.transpose(-2, -1) @ dattk\n\n duf = duf.permute(0, 2, 3, 4, 1).view(B, -1, H * W)\n dx = F.fold(duf, output_size=(H, W), kernel_size=selfk, padding=selfk // 2)\n\n datt = datt.transpose(-1, -2).view(B, nset, H, W)\n\n return dx, datt, None, None, dselfb, dselfw"
},
{
"identifier": "LayerNorm2d",
"path": "src/spandrel/architectures/KBNet/arch/kb_utils.py",
"snippet": "class LayerNorm2d(nn.Module):\n def __init__(self, channels, eps=1e-6, requires_grad=True):\n super().__init__()\n self.register_parameter(\n \"weight\", nn.Parameter(torch.ones(channels), requires_grad=requires_grad)\n )\n self.register_parameter(\n \"bias\", nn.Parameter(torch.zeros(channels), requires_grad=requires_grad)\n )\n self.eps = eps\n\n def forward(self, x):\n return LayerNormFunction.apply(x, self.weight, self.bias, self.eps)"
},
{
"identifier": "SimpleGate",
"path": "src/spandrel/architectures/KBNet/arch/kb_utils.py",
"snippet": "class SimpleGate(nn.Module):\n def forward(self, x):\n x1, x2 = x.chunk(2, dim=1)\n return x1 * x2"
}
] | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .kb_utils import KBAFunction, LayerNorm2d, SimpleGate | 1,359 | # type: ignore
class KBBlock_s(nn.Module):
def __init__(
self, c, DW_Expand=2, FFN_Expand=2, nset=32, k=3, gc=4, lightweight=False
):
super().__init__()
self.k, self.c = k, c
self.nset = nset
dw_ch = int(c * DW_Expand)
ffn_ch = int(FFN_Expand * c)
self.g = c // gc
self.w = nn.Parameter(torch.zeros(1, nset, c * c // self.g * self.k**2))
self.b = nn.Parameter(torch.zeros(1, nset, c))
self.init_p(self.w, self.b)
| # type: ignore
class KBBlock_s(nn.Module):
def __init__(
self, c, DW_Expand=2, FFN_Expand=2, nset=32, k=3, gc=4, lightweight=False
):
super().__init__()
self.k, self.c = k, c
self.nset = nset
dw_ch = int(c * DW_Expand)
ffn_ch = int(FFN_Expand * c)
self.g = c // gc
self.w = nn.Parameter(torch.zeros(1, nset, c * c // self.g * self.k**2))
self.b = nn.Parameter(torch.zeros(1, nset, c))
self.init_p(self.w, self.b)
| self.norm1 = LayerNorm2d(c) | 1 | 2023-11-17 01:11:47+00:00 | 2k |
robocorp/llmstatemachine | src/llmstatemachine/workflow_agent.py | [
{
"identifier": "create_definition",
"path": "src/llmstatemachine/function.py",
"snippet": "def create_definition(func: Callable, goal: str) -> FunctionDefinition:\n source = inspect.getsource(func)\n client = OpenAI()\n response = client.chat.completions.create(\n model=\"gpt-4-1106-preview\",\n messages=[\n {\n \"role\": \"system\",\n \"content\": f\"\"\"Extract function metadata from the following function definition:\n```\n{source}\n```\n\nFocus on details that are meaningful for the following assignment:\n```\n{goal}\n```\n\nExtract the function metadata.\n\"\"\",\n }\n ],\n functions=[\n {\n \"description\": \"FunctionDefinition is a tool for metadata extraction\",\n \"name\": \"FunctionDefinition\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"thinking\": {\n \"type\": \"string\",\n \"description\": (\n \"Logical thinking about function metadata extraction and draft of the answer.\"\n ),\n },\n \"function_name\": {\n \"type\": \"string\",\n \"description\": \"Name of the function.\",\n },\n \"function_description\": {\n \"type\": \"string\",\n \"description\": \"Short well thought description of what the function is used for.\",\n },\n \"argument_description\": {\n \"type\": \"string\",\n \"description\": \"Short well thought description of what the function argument is used for.\",\n },\n },\n \"required\": [\n \"thinking\",\n \"function_name\",\n \"function_description\",\n \"argument_description\",\n ],\n },\n }\n ],\n function_call={\"name\": \"FunctionDefinition\"},\n )\n msg = response.choices[0].message\n assert msg.function_call\n print(msg.function_call)\n args: FunctionDefinition = json.loads(msg.function_call.arguments)\n\n if not is_valid_function_definition(args):\n raise ValueError(\"Invalid data format for FunctionDefinition\")\n\n return args"
},
{
"identifier": "FunctionDefinition",
"path": "src/llmstatemachine/function.py",
"snippet": "class FunctionDefinition(TypedDict):\n function_name: str\n function_description: str\n argument_description: str"
}
] | import json
from typing import Dict, Callable, Any, Tuple, List
from openai.types.chat.chat_completion_message import FunctionCall
from .function import create_definition, FunctionDefinition
from openai import OpenAI
from openai.types.chat import (
ChatCompletionMessageParam,
ChatCompletionMessage,
completion_create_params,
) | 756 |
TransitionFunction = Callable[[...], str]
FUNCTION_NAME = "ActionSelector"
MODEL = "gpt-4-1106-preview" # "gpt-4"
_CURRENT_STEPPING_AGENT = None
class WorkflowAgent:
def __init__(
self, goal: str, transitions: Dict[str, Dict[str, TransitionFunction]]
):
if "INIT" not in transitions:
raise Exception("Must define INIT state")
self._transitions: Dict[str, Dict[str, TransitionFunction]] = transitions
self._current_state = "INIT"
self.next_state = None
self._messages: List[ChatCompletionMessageParam] = []
self._messages.append({"role": "system", "content": goal})
self._client = OpenAI()
|
TransitionFunction = Callable[[...], str]
FUNCTION_NAME = "ActionSelector"
MODEL = "gpt-4-1106-preview" # "gpt-4"
_CURRENT_STEPPING_AGENT = None
class WorkflowAgent:
def __init__(
self, goal: str, transitions: Dict[str, Dict[str, TransitionFunction]]
):
if "INIT" not in transitions:
raise Exception("Must define INIT state")
self._transitions: Dict[str, Dict[str, TransitionFunction]] = transitions
self._current_state = "INIT"
self.next_state = None
self._messages: List[ChatCompletionMessageParam] = []
self._messages.append({"role": "system", "content": goal})
self._client = OpenAI() | self._func_defs: Dict[TransitionFunction, FunctionDefinition] = dict() | 1 | 2023-11-17 17:37:08+00:00 | 2k |
GoldenThrust/Virtual-Bank | api/debit_cards/serializers.py | [
{
"identifier": "DebitCard",
"path": "api/debit_cards/models.py",
"snippet": "class DebitCard(models.Model):\n account = models.ForeignKey(Account, on_delete=models.CASCADE)\n card_number = models.BigIntegerField()\n cvv = models.CharField(max_length=4)\n expiration_date = models.DateTimeField()\n created_date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"{self.card_number} - User: {self.account.user.first_name} {self.account.user.last_name}\""
},
{
"identifier": "DebitCardTransaction",
"path": "api/debit_cards/models.py",
"snippet": "class DebitCardTransaction(models.Model):\n transaction = models.OneToOneField(Transaction, on_delete=models.CASCADE, related_name='debit_card')\n transaction_partner_account = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='transaction_partner_debit_card')\n\n def __str__(self):\n return f\"Transfer ID: {self.pk} - Receiver: {self.transaction.account.user.first_name} {self.transaction.account.user.last_name} - Transaction_partner: {self.transaction_partner_account.user.first_name} {self.transaction_partner_account.user.last_name} - Amount: {self.transaction.amount}\""
},
{
"identifier": "generate_valid_credit_card_number",
"path": "api/debit_cards/utils.py",
"snippet": "def generate_valid_credit_card_number():\n '''\n Generate a random credit card number using the Luhn algorithm.\n\n Returns:\n - str: A valid 16-digit credit card number with a starting digit '5'.\n '''\n card_number = '5' + ''.join(str(random.randint(0, 9)) for _ in range(13))\n \n checksum = luhn_checksum(card_number)\n \n while checksum != 0:\n card_number = '5' + ''.join(str(random.randint(0, 9)) for _ in range(13))\n checksum = luhn_checksum(card_number)\n\n return card_number"
},
{
"identifier": "generate_cvv",
"path": "api/debit_cards/utils.py",
"snippet": "def generate_cvv(card_number, expiration_date):\n '''\n Generate a simulated CVV based on the provided card number and expiration date.\n\n Args:\n - card_number (str): The card number to generate CVV from.\n - expiration_date (datetime.datetime): The expiration date of the card.\n\n Returns:\n - str: The simulated CVV code.\n '''\n formatted_date = expiration_date.strftime('%d%m')\n \n card_number_int = int(card_number)\n \n masked_card_number = card_number_int >> 5\n \n combined_data = int(f'{formatted_date}{masked_card_number}')\n \n masked_combined_data = combined_data & card_number_int\n \n hashed = hashlib.sha256(str(masked_combined_data).encode()).hexdigest()\n\n cvv = []\n index = 0\n for char in hashed[::-5]:\n index += 1\n try:\n int_value = int(char)\n \n if len(cvv) < 3:\n cvv.append(char)\n except ValueError:\n pass\n \n return ''.join(cvv)"
}
] | from rest_framework import serializers
from .models import DebitCard, DebitCardTransaction
from .utils import generate_valid_credit_card_number, generate_cvv
from accounts.serializers import AccountSerializer
from transactions.serializers import TransactionSerializer | 790 |
class DebitCardSerializer(serializers.ModelSerializer):
card_number = serializers.CharField(read_only=True)
cvv = serializers.CharField(read_only=True)
created_date = serializers.DateTimeField(read_only=True)
account = AccountSerializer()
expiration_date = serializers.SerializerMethodField()
class Meta:
|
class DebitCardSerializer(serializers.ModelSerializer):
card_number = serializers.CharField(read_only=True)
cvv = serializers.CharField(read_only=True)
created_date = serializers.DateTimeField(read_only=True)
account = AccountSerializer()
expiration_date = serializers.SerializerMethodField()
class Meta: | model = DebitCard | 0 | 2023-11-10 12:39:38+00:00 | 2k |
Mj23978/OpenServer | openserver/core/vector_store/qdrant.py | [
{
"identifier": "get_config",
"path": "openserver/core/config/config.py",
"snippet": "def get_config(self, key: str, default: Optional[str] = None) -> str | None:\n return self.model_dump().get(key, default)"
},
{
"identifier": "VectorStore",
"path": "openserver/core/vector_store/base.py",
"snippet": "class VectorStore(ABC):\n\n client: Vector\n\n def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:\n \"\"\"Run more documents through the embeddings and add to the vectorstore.\n \"\"\"\n texts: list[str] = [doc.page_content for doc in documents]\n metadatas = [doc.metadata for doc in documents]\n return self.add_texts(texts=texts, metadatas=metadatas, **kwargs)\n\n def add_texts(\n self,\n texts: Iterable[str],\n metadatas: Optional[List[dict]] = None,\n **kwargs: Any,\n ) -> List[str]:\n return self.client.add_texts(texts=texts, metadatas=metadatas, kwargs=kwargs)\n\n def similarity_search(self, query: str, top_k: int, metadata: Optional[dict] = None, **kwargs: Any) -> List[Document]:\n return self.client.similarity_search(query=query, top_k=top_k, metadata=metadata, kwargs=kwargs)\n\n def similarity_search_with_relevance_scores(self, query: str, top_k: int, score_threshold: float, **kwargs: Any) -> List[Tuple[Document, float]]:\n return self.client.similarity_search_with_relevance_scores(query=query, top_k=top_k, score_threshold=score_threshold, kwargs=kwargs)\n\n def delete_embeddings_from_vector_db(self, ids: List[str]) -> bool | None:\n return self.client.delete(ids=ids)"
},
{
"identifier": "BaseEmbedding",
"path": "openserver/core/vector_store/embedding/base.py",
"snippet": "class BaseEmbedding(ABC):\n \n client: Embeddings\n\n @abstractmethod\n def get_embeddings(self, text: List[str]) -> List[List[float]]:\n pass\n\n @abstractmethod\n def get_embedding(self, text: str) -> List[float]:\n pass"
}
] | from mimetypes import common_types
from typing import Dict, Optional, Union
from qdrant_client import QdrantClient
from qdrant_client.conversions import common_types
from langchain.vectorstores.qdrant import Qdrant
from ..config.config import get_config
from .base import VectorStore
from .embedding.base import BaseEmbedding | 783 | from __future__ import annotations
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
def create_qdrant_client(api_key: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = None
) -> QdrantClient:
if api_key is None:
qdrant_host_name = get_config("QDRANT_HOST_NAME") or "localhost"
qdrant_port = int(get_config("QDRANT_PORT", default="6333"))
qdrant_client = QdrantClient(host=qdrant_host_name, port=qdrant_port)
else:
qdrant_client = QdrantClient(api_key=api_key, url=url, port=port)
return qdrant_client
class QdrantVectorStore(VectorStore):
def __init__(
self,
client: QdrantClient,
collection_name: str,
| from __future__ import annotations
DictFilter = Dict[str, Union[str, int, bool, dict, list]]
MetadataFilter = Union[DictFilter, common_types.Filter]
def create_qdrant_client(api_key: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = None
) -> QdrantClient:
if api_key is None:
qdrant_host_name = get_config("QDRANT_HOST_NAME") or "localhost"
qdrant_port = int(get_config("QDRANT_PORT", default="6333"))
qdrant_client = QdrantClient(host=qdrant_host_name, port=qdrant_port)
else:
qdrant_client = QdrantClient(api_key=api_key, url=url, port=port)
return qdrant_client
class QdrantVectorStore(VectorStore):
def __init__(
self,
client: QdrantClient,
collection_name: str, | embedding_model: BaseEmbedding, | 2 | 2023-11-11 00:32:31+00:00 | 2k |
TCLResearchEurope/torch-dag | torch_dag_algorithms/pruning/orbit.py | [
{
"identifier": "OrbitsDiscoveryStage",
"path": "torch_dag_algorithms/pruning/orbits_search_stage.py",
"snippet": "class OrbitsDiscoveryStage(enum.Enum):\n EXTENDED_ORBIT_DISCOVERY = 'extended_orbits_discovery'\n FINAL_ORBIT_DISCOVERY = 'final_orbits_discovery'\n CLASSIC_ATTENTION_DISCOVERY = 'classic_attention_discovery'"
},
{
"identifier": "InnerVertex",
"path": "torch_dag/core/dag_module.py",
"snippet": "class InnerVertex(Vertex):\n def __init__(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ):\n super().__init__(name=name)\n self._module = module\n self._predecessors = list(predecessors)\n self.dag_module: \"DagModule\" = None\n self.orbit = None\n\n @property\n def successors(self) -> List['InnerVertex']:\n if self.dag_module is None:\n logger.error(f'Trying to get successors of an InnerVertex that has not been assigned to any DagModule.')\n return [vertex for vertex in self.dag_module.inner_vertices if self in vertex.predecessors]\n\n @property\n def predecessors(self) -> List[Vertex]:\n return self._predecessors\n\n @property\n def predecessor_indices(self) -> List[Vertex]:\n return [self.dag_module.vertices.index(pd) for pd in self.predecessors]\n\n @predecessors.setter\n def predecessors(self, new_predecessors: List[Vertex]):\n if not isinstance(new_predecessors, list):\n logger.error(f'Predecessors is expected to be a list. Got {type(new_predecessors)} except.')\n self._predecessors = new_predecessors\n\n @property\n def module(self) -> torch.nn.Module:\n return self._module\n\n @module.setter\n def module(self, module: torch.nn.Module):\n self._module = module\n # TODO: Remove after validation\n self.dag_module.update_inner_modules()\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n is_atomic = not isinstance(self.module, DagModule)\n result = {\n 'name': self.name,\n 'predecessor_indices': self.predecessor_indices,\n 'is_atomic': is_atomic,\n 'type': 'inner',\n 'orbit': self.orbit,\n }\n if not is_atomic:\n result['module_dict'] = self.module.config_dict(atomic_modules)\n else:\n result['module_index'] = atomic_modules.index(self.module)\n return result"
}
] | from typing import List
from typing import Set
from typing import Tuple
from torch_dag_algorithms.pruning.orbits_search_stage import OrbitsDiscoveryStage
from torch_dag.core.dag_module import InnerVertex | 911 |
class Orbit:
def __init__(self, color: int):
"""Basic orbit object that can represent either extended or final orbit. If orbit has `allow_for_further_processing` set to True then it can be processed by Orbitalizer by it's general mechanism. If set to False orbit won't be processed in any way and will be passed to orbitalization algorithm in unchanged state.
`_found_by` - indicates what stage lead to orbit being found. It's used in testing handling custom known patterns that are handled by hand. It also holds information that can be usefull durning debugging.
Args:
color (int): orbit color. has to be unique
allow_for_further_processing (bool, optional): If False orbit won't be process in any way. Defaults to True.
"""
self.color = color
self.vertices_in_scope: Set[InnerVertex] = set()
self.sources: List[InnerVertex] = []
self.sinks: List[InnerVertex] = []
self.end_path: List[Tuple[InnerVertex, InnerVertex]] = []
self.kmapps = None
self._discovery_stage = None
@property
|
class Orbit:
def __init__(self, color: int):
"""Basic orbit object that can represent either extended or final orbit. If orbit has `allow_for_further_processing` set to True then it can be processed by Orbitalizer by it's general mechanism. If set to False orbit won't be processed in any way and will be passed to orbitalization algorithm in unchanged state.
`_found_by` - indicates what stage lead to orbit being found. It's used in testing handling custom known patterns that are handled by hand. It also holds information that can be usefull durning debugging.
Args:
color (int): orbit color. has to be unique
allow_for_further_processing (bool, optional): If False orbit won't be process in any way. Defaults to True.
"""
self.color = color
self.vertices_in_scope: Set[InnerVertex] = set()
self.sources: List[InnerVertex] = []
self.sinks: List[InnerVertex] = []
self.end_path: List[Tuple[InnerVertex, InnerVertex]] = []
self.kmapps = None
self._discovery_stage = None
@property | def discovery_stage(self) -> OrbitsDiscoveryStage: | 0 | 2023-11-17 15:36:44+00:00 | 2k |
repeating/Binance-P2P-alerts-Telegram-bot | bot/alerts/alert.py | [
{
"identifier": "get_offers",
"path": "bot/binance_api.py",
"snippet": "async def get_offers(asset: str, fiat: str, trade_type: str, payment_method: str,\n rows: int = 5, page: int = 1, trans_amount: str = None) -> List[dict]:\n \"\"\"\n Fetch the best offers from Binance P2P.\n\n :param asset: Cryptocurrency asset, e.g., 'USDT', 'BTC'.\n :param fiat: Fiat currency, e.g., 'USD', 'EUR'.\n :param trade_type: Trade type, either 'Buy' or 'Sell'.\n :param rows: Number of offers to retrieve, default is 5.\n :param page: Page number for pagination, default is 1.\n :param trans_amount: Transaction amount for filtering offers.\n :param payment_method: payment type, default is \"Wise\".\n :return: List of offers from Binance P2P.\n \"\"\"\n data = {\n \"asset\": asset,\n \"fiat\": fiat,\n \"merchantCheck\": 'true', # Assuming this should always be true for more reliable offers.\n \"page\": page,\n \"payTypes\": [payment_method],\n \"publisherType\": None, # Assuming we don't filter by publisher type.\n \"rows\": rows,\n \"tradeType\": trade_type,\n \"transAmount\": trans_amount\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n async with aiohttp.ClientSession() as session:\n async with session.post(BINANCE_P2P_API_URL, json=data, headers=headers) as response:\n if response.status == 200:\n response_json = await response.json()\n offers_data = response_json.get('data', [])\n offers = [{\n 'price': to_float(adv.get('price')),\n 'min_amount': to_float(adv.get('minSingleTransAmount')),\n 'max_amount': to_float(adv.get('maxSingleTransAmount'))\n } for item in offers_data for adv in [item.get('adv', {})]]\n return offers\n else:\n raise Exception(f\"Error fetching offers from Binance P2P: {response.status} - {await response.text()}\")"
},
{
"identifier": "get_link",
"path": "bot/binance_api.py",
"snippet": "def get_link(fiat: str, asset: str, payment_method: str, order_type: str):\n \"\"\"\n Get the link to the offers from Binance P2P.\n\n :param asset: Cryptocurrency asset, e.g., 'USDT', 'BTC'.\n :param fiat: Fiat currency, e.g., 'USD', 'EUR'.\n :param payment_method: payment type, default is \"Wise\".\n :param order_type: Order type, either 'Buy' or 'Sell'.\n :return: str, link to the offers from Binance P2P.\n \"\"\"\n url = f\"https://p2p.binance.com/en/trade/{order_type}/{payment_method}/{asset}?fiat={fiat}\"\n return url"
},
{
"identifier": "send_telegram_message",
"path": "bot/utils.py",
"snippet": "def send_telegram_message(user_id, message):\n \"\"\"\n Send a message to a user from a Telegram bot.\n\n Parameters:\n user_id (str): Unique identifier for the target user or username of the target channel.\n message (str): Text of the message to be sent.\n\n Returns:\n dict: Response from the Telegram API.\n \"\"\"\n # Telegram API endpoint for sending messages\n send_message_url = f\"https://api.telegram.org/bot{TELEGRAM_TOKEN}/sendMessage\"\n\n # Parameters for the API request\n params = {\n 'chat_id': user_id,\n 'text': message,\n 'parse_mode': 'HTML'\n }\n\n # Making the request to the Telegram API\n response = requests.post(send_message_url, params=params)\n\n # Returning the response as a Python dictionary\n return response.json()"
}
] | from datetime import datetime, timedelta
from bot.binance_api import get_offers, get_link
from bot.utils import send_telegram_message | 1,063 |
class Alert:
def __init__(self, alert_id, user_id, asset, fiat, trade_type, threshold_price, payment_method):
self.alert_id = alert_id
self.user_id = user_id
self.asset = asset
self.fiat = fiat
self.trade_type = trade_type
self.threshold_price = threshold_price
self.payment_method = payment_method
self.active = True
self.last_triggered = None # Track when the alert was last triggered
self.trigger_interval = 15 # in minutes
|
class Alert:
def __init__(self, alert_id, user_id, asset, fiat, trade_type, threshold_price, payment_method):
self.alert_id = alert_id
self.user_id = user_id
self.asset = asset
self.fiat = fiat
self.trade_type = trade_type
self.threshold_price = threshold_price
self.payment_method = payment_method
self.active = True
self.last_triggered = None # Track when the alert was last triggered
self.trigger_interval = 15 # in minutes | self.link = get_link(self.fiat, self.asset, self.payment_method, self.trade_type) | 1 | 2023-11-12 10:20:26+00:00 | 2k |
timlrx/simple-ai-agents | simple_ai_agents/chat_session.py | [
{
"identifier": "ChatMessage",
"path": "simple_ai_agents/models.py",
"snippet": "class ChatMessage(BaseModel):\n role: str\n content: str\n name: Optional[str] = None\n function_call: Optional[str] = None\n received_at: datetime.datetime = Field(default_factory=now_tz)\n finish_reason: Optional[str] = None\n prompt_length: Optional[int] = None\n completion_length: Optional[int] = None\n total_length: Optional[int] = None\n\n def __str__(self) -> str:\n return str(self.model_dump_json(exclude_none=True))"
},
{
"identifier": "ChatSession",
"path": "simple_ai_agents/models.py",
"snippet": "class ChatSession(BaseModel):\n id: Union[str, UUID] = Field(default_factory=uuid4)\n created_at: datetime.datetime = Field(default_factory=now_tz)\n system: str\n params: Dict[str, Any] = {}\n messages: List[ChatMessage] = []\n input_fields: Set[str] = set()\n recent_messages: Optional[int] = None\n save_messages: Optional[bool] = True\n total_prompt_length: int = 0\n total_completion_length: int = 0\n total_length: int = 0\n title: Optional[str] = None\n\n def __str__(self) -> str:\n sess_start_str = self.created_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n if self.messages:\n last_message_str = self.messages[-1].received_at.strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n else:\n last_message_str = \"N/A\"\n return f\"\"\"Chat session started at {sess_start_str}:\n - {len(self.messages):,} Messages\n - Last message sent at {last_message_str}\"\"\"\n\n def add_messages(\n self,\n user_message: ChatMessage,\n assistant_message: ChatMessage,\n save_messages: Optional[bool] = None,\n ) -> None:\n # if save_messages is explicitly defined, always use that choice\n # instead of the default\n to_save = isinstance(save_messages, bool)\n\n if to_save:\n if save_messages:\n self.messages.append(user_message)\n self.messages.append(assistant_message)\n elif self.save_messages:\n self.messages.append(user_message)\n self.messages.append(assistant_message)"
},
{
"identifier": "LLMOptions",
"path": "simple_ai_agents/models.py",
"snippet": "class LLMOptions(TypedDict, total=False):\n model: str\n functions: List\n function_call: str\n temperature: float\n top_p: float\n n: int\n stream: bool\n stop: str\n max_tokens: float\n presence_penalty: float\n frequency_penalty: float\n logit_bias: dict\n user: str\n deployment_id: str\n request_timeout: int\n api_base: str\n api_version: str\n api_key: str\n model_list: list"
}
] | from json import JSONDecodeError
from typing import Any, AsyncGenerator, Generator, Optional, Type, TypeVar
from instructor.function_calls import Mode
from instructor.patch import handle_response_model, process_response
from litellm import ModelResponse, acompletion, completion
from pydantic import BaseModel, ValidationError
from simple_ai_agents.models import ChatMessage, ChatSession, LLMOptions
import litellm | 862 |
litellm.telemetry = False
litellm.add_function_to_prompt = True # add function to prompt for non openai models
litellm.drop_params = True # drop params if unsupported by provider
litellm.suppress_debug_info = True
T = TypeVar("T", bound=BaseModel)
|
litellm.telemetry = False
litellm.add_function_to_prompt = True # add function to prompt for non openai models
litellm.drop_params = True # drop params if unsupported by provider
litellm.suppress_debug_info = True
T = TypeVar("T", bound=BaseModel)
| class ChatLLMSession(ChatSession): | 1 | 2023-11-10 06:01:25+00:00 | 2k |
DIAGNijmegen/HoVer-UNet | models/HoVerNet/post_proc.py | [
{
"identifier": "remove_small_objects",
"path": "models/HoVerNet/utils.py",
"snippet": "def remove_small_objects(pred, min_size=64, connectivity=1):\n \"\"\"Remove connected components smaller than the specified size.\n\n This function is taken from skimage.morphology.remove_small_objects, but the warning\n is removed when a single label is provided.\n\n Args:\n pred: input labelled array\n min_size: minimum size of instance in output array\n connectivity: The connectivity defining the neighborhood of a pixel.\n\n Returns:\n out: output array with instances removed under min_size\n\n \"\"\"\n out = pred\n\n if min_size == 0: # shortcut for efficiency\n return out\n\n if out.dtype == bool:\n selem = ndimage.generate_binary_structure(pred.ndim, connectivity)\n ccs = np.zeros_like(pred, dtype=np.int32)\n ndimage.label(pred, selem, output=ccs)\n else:\n ccs = out\n\n try:\n component_sizes = np.bincount(ccs.ravel())\n except ValueError:\n raise ValueError(\n \"Negative value labels are not supported. Try \"\n \"relabeling the input with `scipy.ndimage.label` or \"\n \"`skimage.morphology.label`.\"\n )\n\n too_small = component_sizes < min_size\n too_small_mask = too_small[ccs]\n out[too_small_mask] = 0\n\n return out"
},
{
"identifier": "get_bounding_box",
"path": "models/HoVerNet/utils.py",
"snippet": "def get_bounding_box(img):\n \"\"\"Get bounding box coordinate information.\"\"\"\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]"
}
] | import warnings
import cv2
import numpy as np
from scipy.ndimage import measurements
from scipy.ndimage.morphology import (
binary_fill_holes,
)
from skimage.segmentation import watershed
from models.HoVerNet.utils import remove_small_objects, get_bounding_box | 1,523 |
def noop(*args, **kargs):
pass
warnings.warn = noop
####
def __proc_np_hv(pred):
"""Process Nuclei Prediction with XY Coordinate Map.
Args:
pred: prediction output, assuming
channel 0 contain probability map of nuclei
channel 1 containing the regressed X-map
channel 2 containing the regressed Y-map
"""
pred = np.array(pred, dtype=np.float32)
blb_raw = pred[..., 0]
h_dir_raw = pred[..., 1]
v_dir_raw = pred[..., 2]
# processing
blb = np.array(blb_raw >= 0.5, dtype=np.int32)
blb = measurements.label(blb)[0]
blb = remove_small_objects(blb, min_size=10)
blb[blb > 0] = 1 # background is 0 already
h_dir = cv2.normalize(
h_dir_raw, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
v_dir = cv2.normalize(
v_dir_raw, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=21)
sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=21)
sobelh = 1 - (
cv2.normalize(
sobelh, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
)
sobelv = 1 - (
cv2.normalize(
sobelv, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
)
overall = np.maximum(sobelh, sobelv)
overall = overall - (1 - blb)
overall[overall < 0] = 0
dist = (1.0 - overall) * blb
## nuclei values form mountains so inverse to get basins
dist = -cv2.GaussianBlur(dist, (3, 3), 0)
overall = np.array(overall >= 0.4, dtype=np.int32)
marker = blb - overall
marker[marker < 0] = 0
marker = binary_fill_holes(marker).astype("uint8")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
marker = measurements.label(marker)[0]
marker = remove_small_objects(marker, min_size=10)
proced_pred = watershed(dist, markers=marker, mask=blb)
return proced_pred
####
def process(pred_map, nr_types=None, return_centroids=False):
"""Post processing script for image tiles.
Args:
pred_map: commbined output of tp, np and hv branches, in the same order
nr_types: number of types considered at output of nc branch
overlaid_img: img to overlay the predicted instances upon, `None` means no
type_colour (dict) : `None` to use random, else overlay instances of a type to colour in the dict
output_dtype: data type of output
Returns:
pred_inst: pixel-wise nuclear instance segmentation prediction
pred_type_out: pixel-wise nuclear type prediction
"""
if nr_types is not None:
pred_type = pred_map[..., :1]
pred_inst = pred_map[..., 1:]
pred_type = pred_type.astype(np.int32)
else:
pred_inst = pred_map
pred_inst = np.squeeze(pred_inst)
pred_inst = __proc_np_hv(pred_inst)
inst_info_dict = None
if return_centroids or nr_types is not None:
inst_id_list = np.unique(pred_inst)[1:] # exlcude background
inst_info_dict = {}
for inst_id in inst_id_list:
inst_map = pred_inst == inst_id
# TODO: chane format of bbox output
|
def noop(*args, **kargs):
pass
warnings.warn = noop
####
def __proc_np_hv(pred):
"""Process Nuclei Prediction with XY Coordinate Map.
Args:
pred: prediction output, assuming
channel 0 contain probability map of nuclei
channel 1 containing the regressed X-map
channel 2 containing the regressed Y-map
"""
pred = np.array(pred, dtype=np.float32)
blb_raw = pred[..., 0]
h_dir_raw = pred[..., 1]
v_dir_raw = pred[..., 2]
# processing
blb = np.array(blb_raw >= 0.5, dtype=np.int32)
blb = measurements.label(blb)[0]
blb = remove_small_objects(blb, min_size=10)
blb[blb > 0] = 1 # background is 0 already
h_dir = cv2.normalize(
h_dir_raw, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
v_dir = cv2.normalize(
v_dir_raw, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=21)
sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=21)
sobelh = 1 - (
cv2.normalize(
sobelh, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
)
sobelv = 1 - (
cv2.normalize(
sobelv, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
)
)
overall = np.maximum(sobelh, sobelv)
overall = overall - (1 - blb)
overall[overall < 0] = 0
dist = (1.0 - overall) * blb
## nuclei values form mountains so inverse to get basins
dist = -cv2.GaussianBlur(dist, (3, 3), 0)
overall = np.array(overall >= 0.4, dtype=np.int32)
marker = blb - overall
marker[marker < 0] = 0
marker = binary_fill_holes(marker).astype("uint8")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
marker = measurements.label(marker)[0]
marker = remove_small_objects(marker, min_size=10)
proced_pred = watershed(dist, markers=marker, mask=blb)
return proced_pred
####
def process(pred_map, nr_types=None, return_centroids=False):
"""Post processing script for image tiles.
Args:
pred_map: commbined output of tp, np and hv branches, in the same order
nr_types: number of types considered at output of nc branch
overlaid_img: img to overlay the predicted instances upon, `None` means no
type_colour (dict) : `None` to use random, else overlay instances of a type to colour in the dict
output_dtype: data type of output
Returns:
pred_inst: pixel-wise nuclear instance segmentation prediction
pred_type_out: pixel-wise nuclear type prediction
"""
if nr_types is not None:
pred_type = pred_map[..., :1]
pred_inst = pred_map[..., 1:]
pred_type = pred_type.astype(np.int32)
else:
pred_inst = pred_map
pred_inst = np.squeeze(pred_inst)
pred_inst = __proc_np_hv(pred_inst)
inst_info_dict = None
if return_centroids or nr_types is not None:
inst_id_list = np.unique(pred_inst)[1:] # exlcude background
inst_info_dict = {}
for inst_id in inst_id_list:
inst_map = pred_inst == inst_id
# TODO: chane format of bbox output | rmin, rmax, cmin, cmax = get_bounding_box(inst_map) | 1 | 2023-11-10 09:37:29+00:00 | 2k |
fofr/cog-sdxl-lcm-multi-controlnet-lora | controlnet.py | [
{
"identifier": "ControlNetPreprocessor",
"path": "controlnet_preprocess.py",
"snippet": "class ControlNetPreprocessor:\n ANNOTATOR_NAMES = [\n \"none\",\n \"edge_canny\",\n \"depth_leres\",\n \"depth_midas\",\n \"soft_edge_pidi\",\n \"soft_edge_hed\",\n \"lineart\",\n \"lineart_anime\",\n \"openpose\",\n # \"straight_edge_mlsd\",\n # \"face_detector\",\n # \"content_shuffle\",\n # \"normal_bae\",\n # \"segementation_sam\",\n ]\n\n def __init__(self, predictor):\n WeightsDownloader.download_if_not_exists(\n CONTROLNET_PREPROCESSOR_URL, CONTROLNET_PREPROCESSOR_MODEL_CACHE\n )\n\n self.annotators = {\n \"edge_canny\": CannyDetector(),\n \"depth_leres\": self.initialize_detector(LeresDetector),\n \"depth_midas\": self.initialize_detector(MidasDetector),\n \"soft_edge_pidi\": self.initialize_detector(PidiNetDetector),\n \"soft_edge_hed\": self.initialize_detector(HEDdetector),\n \"lineart\": self.initialize_detector(LineartDetector),\n \"lineart_anime\": self.initialize_detector(LineartAnimeDetector),\n \"openpose\": self.initialize_detector(OpenposeDetector),\n # \"straight_edge_mlsd\": self.initialize_detector(MLSDdetector),\n # \"face_detector\": MediapipeFaceDetector(),\n # \"content_shuffle\": ContentShuffleDetector(),\n # \"normal_bae\": self.initialize_detector(NormalBaeDetector),\n # \"segementation_sam\": self.initialize_detector(\n # SamDetector,\n # model_name=\"ybelkada/segment-anything\",\n # subfolder=\"checkpoints\",\n # ),\n }\n\n torch.device(\"cuda\")\n\n @staticmethod\n def get_annotator_names():\n return ControlNetPreprocessor.ANNOTATOR_NAMES\n\n def initialize_detector(\n self, detector_class, model_name=\"lllyasviel/Annotators\", **kwargs\n ):\n print(f\"Initializing {detector_class.__name__}\")\n return detector_class.from_pretrained(\n model_name,\n cache_dir=CONTROLNET_PREPROCESSOR_MODEL_CACHE,\n **kwargs,\n )\n\n def annotators_list(self):\n return list(self.annotators.keys())\n\n def process_image(self, image, annotator):\n print(f\"Processing image with {annotator}\")\n return self.annotators[annotator](image)"
},
{
"identifier": "WeightsDownloader",
"path": "weights_downloader.py",
"snippet": "class WeightsDownloader:\n @staticmethod\n def download_if_not_exists(url, dest):\n if not os.path.exists(dest):\n WeightsDownloader.download(url, dest)\n\n @staticmethod\n def download(url, dest):\n start = time.time()\n print(\"downloading url: \", url)\n print(\"downloading to: \", dest)\n subprocess.check_call([\"pget\", \"-x\", url, dest], close_fds=False)\n print(\"downloading took: \", time.time() - start)"
}
] | import torch
from diffusers import ControlNetModel
from controlnet_preprocess import ControlNetPreprocessor
from weights_downloader import WeightsDownloader | 917 |
CONTROLNET_MODEL_CACHE = "./controlnet-cache"
CONTROLNET_URL = "https://weights.replicate.delivery/default/controlnet/sdxl-cn-canny-depth-softe-pose-qr.tar"
class ControlNet:
CONTROLNET_MODELS = [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose",
# Preprocessors without an XL model yet
# "straight_edge_mlsd",
# "face_detector",
# "content_shuffle",
# "normal_bae",
# "segementation_sam",
]
def __init__(self, predictor):
|
CONTROLNET_MODEL_CACHE = "./controlnet-cache"
CONTROLNET_URL = "https://weights.replicate.delivery/default/controlnet/sdxl-cn-canny-depth-softe-pose-qr.tar"
class ControlNet:
CONTROLNET_MODELS = [
"none",
"edge_canny",
"illusion",
"depth_leres",
"depth_midas",
"soft_edge_pidi",
"soft_edge_hed",
"lineart",
"lineart_anime",
"openpose",
# Preprocessors without an XL model yet
# "straight_edge_mlsd",
# "face_detector",
# "content_shuffle",
# "normal_bae",
# "segementation_sam",
]
def __init__(self, predictor): | WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE) | 1 | 2023-11-16 11:11:27+00:00 | 2k |
joyn-gg/discord.http | discord_http/view.py | [
{
"identifier": "PartialEmoji",
"path": "discord_http/emoji.py",
"snippet": "class PartialEmoji:\n def __init__(self, emoji: str):\n self._original_name: str = emoji\n\n self.id: Optional[int] = None\n self.animated: bool = False\n self.discord_emoji: bool = False\n\n is_custom: Optional[re.Match] = utils.re_emoji.search(emoji)\n\n if is_custom:\n _animated, _name, _id = is_custom.groups()\n self.discord_emoji = True\n self.animated = bool(_animated)\n self.name: str = _name\n self.id = int(_id)\n else:\n self.name: str = emoji\n\n def __repr__(self) -> str:\n if self.discord_emoji:\n return f\"<PartialEmoji name='{self.name}' id={self.id} animated={self.animated}>\"\n return f\"<PartialEmoji name='{self.name}'>\"\n\n def __str__(self) -> str:\n return self._original_name\n\n def __int__(self) -> Optional[int]:\n if self.discord_emoji:\n return self.id\n return None\n\n @property\n def url(self) -> Optional[str]:\n \"\"\" `str`: Returns the URL of the emoji if it's a Discord emoji \"\"\"\n if self.discord_emoji:\n return f\"{Asset.BASE}/emojis/{self.id}.{'gif' if self.animated else 'png'}\"\n return None\n\n def to_dict(self) -> dict:\n \"\"\" `dict`: Returns a dict representation of the emoji \"\"\"\n if self.discord_emoji:\n # Include animated if it's a Discord emoji\n return {\"id\": self.id, \"name\": self.name, \"animated\": self.animated}\n return {\"name\": self.name, \"id\": None}\n\n def to_reaction(self) -> str:\n \"\"\" `str`: Returns a string representation of the emoji \"\"\"\n if self.discord_emoji:\n return f\"{self.name}:{self.id}\"\n return self.name"
},
{
"identifier": "ButtonStyles",
"path": "discord_http/enums.py",
"snippet": "class ButtonStyles(Enum):\n primary = 1\n secondary = 2\n success = 3\n danger = 4\n link = 5\n\n blurple = 1\n grey = 2\n gray = 2\n green = 3\n red = 4\n url = 5"
},
{
"identifier": "ComponentType",
"path": "discord_http/enums.py",
"snippet": "class ComponentType(Enum):\n action_row = 1\n button = 2\n string_select = 3\n text_input = 4\n user_select = 5\n role_select = 6\n mentionable_select = 7\n channel_select = 8"
},
{
"identifier": "TextStyles",
"path": "discord_http/enums.py",
"snippet": "class TextStyles(Enum):\n short = 1\n paragraph = 2"
},
{
"identifier": "ChannelType",
"path": "discord_http/enums.py",
"snippet": "class ChannelType(Enum):\n guild_text = 0\n dm = 1\n guild_voice = 2\n group_dm = 3\n guild_category = 4\n guild_news = 5\n guild_store = 6\n guild_news_thread = 10\n guild_public_thread = 11\n guild_private_thread = 12\n guild_stage_voice = 13\n guild_directory = 14\n guild_forum = 15"
}
] | import asyncio
import inspect
import logging
import secrets
import time
from typing import Union, Optional, TYPE_CHECKING, Callable
from .emoji import PartialEmoji
from .enums import ButtonStyles, ComponentType, TextStyles, ChannelType
from . import Snowflake
from .channel import BaseChannel
from .context import Context
from .message import Message
from .response import BaseResponse | 1,198 |
if TYPE_CHECKING:
_log = logging.getLogger(__name__)
__all__ = (
"Button",
"ChannelSelect",
"Item",
"Link",
"MentionableSelect",
"Modal",
"RoleSelect",
"Select",
"UserSelect",
"View",
)
def _garbage_id() -> str:
""" `str`: Returns a random ID to satisfy Discord API """
return secrets.token_hex(16)
class Item:
def __init__(self, *, type: int, row: Optional[int] = None):
self.row: Optional[int] = row
self.type: int = type
def __repr__(self) -> str:
return f"<Item type={self.type} row={self.row}>"
def to_dict(self) -> dict:
""" `dict`: Returns a dict representation of the item """
raise NotImplementedError("to_dict not implemented")
class Button(Item):
def __init__(
self,
*,
label: Optional[str] = None,
style: Union[ButtonStyles, str, int] = ButtonStyles.primary,
disabled: bool = False,
row: Optional[int] = None,
custom_id: Optional[str] = None,
emoji: Optional[Union[str, dict]] = None,
url: Optional[str] = None
):
|
if TYPE_CHECKING:
_log = logging.getLogger(__name__)
__all__ = (
"Button",
"ChannelSelect",
"Item",
"Link",
"MentionableSelect",
"Modal",
"RoleSelect",
"Select",
"UserSelect",
"View",
)
def _garbage_id() -> str:
""" `str`: Returns a random ID to satisfy Discord API """
return secrets.token_hex(16)
class Item:
def __init__(self, *, type: int, row: Optional[int] = None):
self.row: Optional[int] = row
self.type: int = type
def __repr__(self) -> str:
return f"<Item type={self.type} row={self.row}>"
def to_dict(self) -> dict:
""" `dict`: Returns a dict representation of the item """
raise NotImplementedError("to_dict not implemented")
class Button(Item):
def __init__(
self,
*,
label: Optional[str] = None,
style: Union[ButtonStyles, str, int] = ButtonStyles.primary,
disabled: bool = False,
row: Optional[int] = None,
custom_id: Optional[str] = None,
emoji: Optional[Union[str, dict]] = None,
url: Optional[str] = None
): | super().__init__(type=int(ComponentType.button), row=row) | 2 | 2023-11-14 12:50:42+00:00 | 2k |
catid/aiwebcam2 | app.py | [
{
"identifier": "logger",
"path": "utils.py",
"snippet": "class ColoredFormatter(logging.Formatter):\n def format(self, record):\ndef setup_colored_logging(level=logging.INFO):"
},
{
"identifier": "ASRServiceRunner",
"path": "service_asr.py",
"snippet": "class ASRServiceRunner:\n def __init__(self):\n self.lock = asyncio.Lock()\n self.command_queue = Queue()\n self.response_queue = Queue()\n self.service_process = Process(\n target=run_loop,\n args=(self.command_queue, self.response_queue))\n self.service_process.start()\n\n def close(self):\n logger.info(\"Stopping ASR service...\")\n self.command_queue.put(('stop',))\n self.service_process.join()\n self.command_queue.close()\n self.response_queue.close()\n logger.info(\"ASR service stopped.\")\n\n async def Transcribe(self, pcm_data_array, channels, sample_rate):\n async with self.lock:\n self.command_queue.put(('transcribe', pcm_data_array, channels, sample_rate))\n return await asyncio.get_running_loop().run_in_executor(None, self.response_queue.get)"
},
{
"identifier": "LLMServiceRunner",
"path": "service_llm.py",
"snippet": "class LLMServiceRunner:\n def __init__(self):\n self.lock = asyncio.Lock()\n self.command_queue = Queue()\n self.response_queue = Queue()\n self.service_process = Process(\n target=run_loop,\n args=(self.command_queue, self.response_queue))\n self.service_process.start()\n\n def close(self):\n logger.info(\"Stopping LLM service...\")\n self.command_queue.put(('stop',))\n self.service_process.join()\n self.command_queue.close()\n self.response_queue.close()\n logger.info(\"LLM service stopped.\")\n\n async def VisionCompletionBegin(self, prompt_messages):\n async with self.lock:\n self.command_queue.put(('vision_completion', prompt_messages))\n\n async def TextCompletionBegin(self, prompt_messages):\n async with self.lock:\n self.command_queue.put(('text_completion', prompt_messages))\n\n # Returns None on final one\n async def CompletionPoll(self):\n return await asyncio.get_running_loop().run_in_executor(None, self.response_queue.get)"
},
{
"identifier": "TTSServiceRunner",
"path": "service_tts.py",
"snippet": "class TTSServiceRunner:\n def __init__(self):\n self.lock = asyncio.Lock()\n self.command_queue = multiprocessing.Queue()\n self.response_queue = multiprocessing.Queue()\n self.service_process = multiprocessing.Process(\n target=run_loop,\n args=(self.command_queue, self.response_queue))\n self.service_process.start()\n\n self.silence_duration = 0.02\n\n self.next_pts = 0\n\n def close(self):\n logger.info(\"Stopping background TTS worker...\")\n self.command_queue.put(('stop',))\n self.service_process.join()\n logger.info(\"Closing command_queue...\")\n self.command_queue.close()\n logger.info(\"Closing response_queue...\")\n self.response_queue.close()\n logger.info(\"Stopped background TTS worker.\")\n\n def generate_silence_packet(self, duration_seconds):\n chunk = bytes.fromhex('f8 ff fe')\n\n packet = av.packet.Packet(chunk)\n packet.pts = self.next_pts\n packet.dts = self.next_pts\n packet.time_base = time_base_fraction\n\n pts_count = round(duration_seconds * time_base)\n self.next_pts += pts_count\n\n #logger.info(f\"silence pts_count = {pts_count}\")\n\n return packet\n\n # Grab either the next TTS Opus packet to play back,\n # or a silence packet if no data is available.\n def poll_packet(self):\n try:\n duration, pts_count, chunk = self.response_queue.get_nowait()\n\n packet = av.packet.Packet(chunk)\n packet.pts = self.next_pts\n packet.dts = self.next_pts\n packet.time_base = time_base_fraction\n\n self.next_pts += pts_count\n\n return packet, duration\n\n except:\n pass # Ignore Empty exception\n\n return self.generate_silence_packet(self.silence_duration), self.silence_duration\n\n\n async def Speak(self, text):\n async with self.lock:\n self.command_queue.put(('speak', text))"
}
] | from utils import logger
from service_asr import ASRServiceRunner
from service_llm import LLMServiceRunner
from service_tts import TTSServiceRunner
from aiortc import RTCIceCandidate, RTCSessionDescription, RTCPeerConnection
from aiortc.mediastreams import AudioStreamTrack, VideoStreamTrack, MediaStreamError, MediaStreamTrack
from queue import Queue
from fractions import Fraction
from PIL import Image
from zoneinfo import ZoneInfo
import socketio
import asyncio
import re
import io
import base64
import numpy as np
import time, datetime
import aiohttp.web
import asyncio
import ssl
import argparse | 1,591 | # Logging
sio = socketio.AsyncServer(cors_allowed_origins='*')
# Background services
asr_runner = ASRServiceRunner()
llm_runner = LLMServiceRunner()
# WebRTC peer listening for a single browser to connect
# We run each WebRTC peer in a separate process to avoid stalls in playback
# WebRTC Connection
class VideoReceiver(VideoStreamTrack):
kind = "video"
def __init__(self, track):
super().__init__() # Initialize the MediaStreamTrack
self.track = track
self.recording = False
self.recorded_frame = None
def startRecording(self):
self.recording = True
self.recorded_frame = None
def endRecording(self):
self.recording = False
image = self.recorded_frame
self.recorded_frame = None
return image
async def recv(self):
frame = await self.track.recv()
# Process the frame (e.g., save to a file, play audio, etc.)
if self.recording:
if not self.recorded_frame:
self.recorded_frame = frame
return frame
class CustomAudioStream(MediaStreamTrack):
kind = "audio"
def __init__(self):
super().__init__() # don't forget this!
self.tts = TTSServiceRunner()
self.stream_time = None
async def close(self):
super().stop()
self.tts.close()
async def recv(self):
packet, duration = self.tts.poll_packet()
#logger.info(f"opus duration={duration} pts={packet.pts}")
if self.stream_time is None:
self.stream_time = time.time()
wait = self.stream_time - time.time()
await asyncio.sleep(wait)
self.stream_time += duration
return packet
class WebRTCConnection:
def __init__(self, sid):
self.sid = sid
self.pc = RTCPeerConnection()
self.video_track = None
self.processing_audio = False
self.recording = False
self.opus_track = CustomAudioStream()
@self.pc.on("connectionstatechange")
async def on_connectionstatechange():
| # Logging
sio = socketio.AsyncServer(cors_allowed_origins='*')
# Background services
asr_runner = ASRServiceRunner()
llm_runner = LLMServiceRunner()
# WebRTC peer listening for a single browser to connect
# We run each WebRTC peer in a separate process to avoid stalls in playback
# WebRTC Connection
class VideoReceiver(VideoStreamTrack):
kind = "video"
def __init__(self, track):
super().__init__() # Initialize the MediaStreamTrack
self.track = track
self.recording = False
self.recorded_frame = None
def startRecording(self):
self.recording = True
self.recorded_frame = None
def endRecording(self):
self.recording = False
image = self.recorded_frame
self.recorded_frame = None
return image
async def recv(self):
frame = await self.track.recv()
# Process the frame (e.g., save to a file, play audio, etc.)
if self.recording:
if not self.recorded_frame:
self.recorded_frame = frame
return frame
class CustomAudioStream(MediaStreamTrack):
kind = "audio"
def __init__(self):
super().__init__() # don't forget this!
self.tts = TTSServiceRunner()
self.stream_time = None
async def close(self):
super().stop()
self.tts.close()
async def recv(self):
packet, duration = self.tts.poll_packet()
#logger.info(f"opus duration={duration} pts={packet.pts}")
if self.stream_time is None:
self.stream_time = time.time()
wait = self.stream_time - time.time()
await asyncio.sleep(wait)
self.stream_time += duration
return packet
class WebRTCConnection:
def __init__(self, sid):
self.sid = sid
self.pc = RTCPeerConnection()
self.video_track = None
self.processing_audio = False
self.recording = False
self.opus_track = CustomAudioStream()
@self.pc.on("connectionstatechange")
async def on_connectionstatechange(): | logger.info(f"self.pc.connectionState = {self.pc.connectionState}") | 0 | 2023-11-16 03:37:47+00:00 | 2k |
chziakas/backbone-learn | experiments/benchmark_decision_tree.py | [
{
"identifier": "BackboneDecisionTree",
"path": "backbone_learn/backbone/backbone_decision_tree.py",
"snippet": "class BackboneDecisionTree(BackboneSupervised):\n \"\"\"\n Specific implementation of the Backbone method for sparse regression.\n\n This class combines Pearson correlation for feature screening, L0BnB for exact solving, and Lasso for heuristic solving to construct a sparse regression model.\n\n Inherits from:\n BackboneBase (ABC): The abstract base class for backbone algorithms.\n \"\"\"\n\n def set_solvers(\n self,\n alpha=0.5,\n depth=3,\n time_limit=1000,\n _lambda=0.5,\n num_threads=None,\n obj_mode=\"acc\",\n n_bins=2,\n is_data_fit=False,\n ):\n \"\"\"\n Initializes the sparse regression method with specified components.\n\n Args:\n alpha (float): Proportion of features to retain after screening. Defaults to 0.5.\n depth (int, optional): Depth of BendersOCT tree. Defaults to 3.\n time_limit (int): Time limit for the optimization process.\n _lambda (float): Regularization parameter.\n num_threads (int or None): Number of threads for parallel processing.\n obj_mode (str): Objective mode, e.g., 'acc' for accuracy.\n n_bins (int): Number of bins for KBinsDiscretizer. Defaults to 2.\n is_data_fit (bool): Whether data are in the format required for OCT\n \"\"\"\n self.screen_selector = PearsonCorrelationSelector(alpha)\n self.exact_solver = BendersOCTDecisionTree(\n depth=depth,\n time_limit=time_limit,\n _lambda=_lambda,\n num_threads=num_threads,\n obj_mode=obj_mode,\n n_bins=n_bins,\n is_data_fit=is_data_fit,\n )\n self.heuristic_solver = CARTDecisionTree()"
},
{
"identifier": "CARTDecisionTree",
"path": "backbone_learn/heuristic_solvers/cart_decision_tree.py",
"snippet": "class CARTDecisionTree(HeuristicSolverBase):\n \"\"\"\n Implements a Classification And Regression Tree (CART) Decision Tree with cross-validation using AUC.\n This solver is a heuristic approach for fitting a decision tree model and identifying significant features.\n\n Attributes:\n _model (DecisionTreeClassifier): An instance of the sklearn DecisionTreeClassifier.\n _auc_score (float): The maximum AUC score obtained during cross-validation.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes the CARTDecisionTree with a DecisionTreeClassifier model.\n \"\"\"\n self._model = DecisionTreeClassifier()\n self._auc_score = None\n\n @property\n def auc_score(self) -> float:\n \"\"\"\n Returns the maximum AUC score obtained from cross-validation.\n\n Returns:\n float: The maximum AUC score.\n \"\"\"\n return self._auc_score\n\n def fit(self, X: np.ndarray, y: np.ndarray, cv_folds: int = 5, random_state: int = 0) -> None:\n \"\"\"\n Fits a CART Decision Tree model to the data using hyperparameter tuning with cross-validation and evaluates it using AUC.\n\n Args:\n X (np.ndarray): The input features as a NumPy array.\n y (np.ndarray): The target labels as a NumPy array.\n cv_folds (int): The number of folds to use for cross-validation.\n\n \"\"\"\n self._model.set_params(random_state=random_state)\n # Define the parameter grid for hyperparameter tuning\n param_grid = {\"max_depth\": [None, 5, 10, 20], \"min_samples_leaf\": [1, 2, 4]}\n\n # Initialize GridSearchCV with the model and parameter grid\n grid_search = GridSearchCV(\n self._model, param_grid, cv=cv_folds, scoring=\"roc_auc\", verbose=1\n )\n\n # Perform the grid search on the provided data\n grid_search.fit(X, y)\n\n # Update the model with the best found parameters\n self._model = grid_search.best_estimator_\n\n # Store the best AUC score\n self._auc_score = grid_search.best_score_\n\n def get_relevant_variables(self, threshold: float) -> np.ndarray:\n \"\"\"\n Identifies features with importance greater than a specified threshold.\n\n Args:\n threshold (float): The threshold for determining feature relevance.\n\n Returns:\n np.ndarray: An array of indices of relevant features.\n \"\"\"\n\n significant_indices = np.where(self._model.feature_importances_ > threshold)[0]\n return significant_indices\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predicts the target labels for the given data.\n\n Args:\n X (np.ndarray): The input features as a NumPy array.\n\n Returns:\n np.ndarray: The predicted target labels.\n \"\"\"\n return self._model.predict(X)"
}
] | import time
from itertools import product
from sklearn.datasets import make_classification
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder
from utils import save_results
from backbone_learn.backbone.backbone_decision_tree import BackboneDecisionTree
from backbone_learn.heuristic_solvers.cart_decision_tree import CARTDecisionTree | 1,590 |
# Define parameter ranges for Backbone parameters
alpha_range = [0.1, 0.5]
beta_range = [0.5, 0.9]
num_subproblems_range = [5, 10]
num_iterations_range = [1]
# Define parameter ranges for FlowOCT parameters
depth_range = [2]
_lambda_range = [0.5]
# Define dataset parameters
n_informative = 4
n_bins = 5
n_features_range = [20]
n_samples = 500
n_classes = 2
random_state = 17
time_limit = 3600
log_filename = "decision_tree_results.json"
results = []
# Experiment loop
for n_features in n_features_range:
# Generate synthetic classification data
X, y = make_classification(
n_samples=n_samples,
n_informative=n_informative,
n_features=n_features,
n_classes=n_classes,
random_state=random_state,
)
# Convert features to binary
est_X = KBinsDiscretizer(
n_bins=n_bins, encode="ordinal", strategy="quantile", random_state=random_state
)
est_X.fit(X)
X_bin = est_X.transform(X)
enc = OneHotEncoder(handle_unknown="error", drop="if_binary")
X_cat_enc = enc.fit_transform(X_bin).toarray()
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_cat_enc, y, test_size=0.2, random_state=random_state
)
for depth in depth_range:
# CARTDecisionTree model iteration for heuristic_model
|
# Define parameter ranges for Backbone parameters
alpha_range = [0.1, 0.5]
beta_range = [0.5, 0.9]
num_subproblems_range = [5, 10]
num_iterations_range = [1]
# Define parameter ranges for FlowOCT parameters
depth_range = [2]
_lambda_range = [0.5]
# Define dataset parameters
n_informative = 4
n_bins = 5
n_features_range = [20]
n_samples = 500
n_classes = 2
random_state = 17
time_limit = 3600
log_filename = "decision_tree_results.json"
results = []
# Experiment loop
for n_features in n_features_range:
# Generate synthetic classification data
X, y = make_classification(
n_samples=n_samples,
n_informative=n_informative,
n_features=n_features,
n_classes=n_classes,
random_state=random_state,
)
# Convert features to binary
est_X = KBinsDiscretizer(
n_bins=n_bins, encode="ordinal", strategy="quantile", random_state=random_state
)
est_X.fit(X)
X_bin = est_X.transform(X)
enc = OneHotEncoder(handle_unknown="error", drop="if_binary")
X_cat_enc = enc.fit_transform(X_bin).toarray()
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X_cat_enc, y, test_size=0.2, random_state=random_state
)
for depth in depth_range:
# CARTDecisionTree model iteration for heuristic_model | heuristic_model = CARTDecisionTree(max_depth=depth) | 1 | 2023-11-18 14:28:12+00:00 | 2k |
openclimatefix/Open-Source-Quartz-Solar-Forecast | tests/eval/test_pv.py | [
{
"identifier": "get_pv_truth",
"path": "quartz_solar_forecast/eval/pv.py",
"snippet": "def get_pv_truth(testset: pd.DataFrame):\n\n print('Loading PV data')\n\n # download from hugginface or load from cache\n cache_dir = \"data/pv\"\n metadata_file = f\"{cache_dir}/pv.netcdf\"\n if not os.path.exists(metadata_file):\n print('Loading from HF)')\n os.makedirs(cache_dir, exist_ok=True)\n fs.get(\"datasets/openclimatefix/uk_pv/pv.netcdf\", metadata_file)\n\n # Load in the dataset\n pv_ds = xr.open_dataset(metadata_file, engine=\"h5netcdf\")\n\n combined_data = []\n for index, row in testset.iterrows():\n print(f'Processing {index} of {len(testset)}')\n pv_id = str(row[\"pv_id\"])\n base_datetime = pd.to_datetime(row[\"timestamp\"])\n\n # Calculate future timestamps up to the max horizon\n for i in range(0, 49): # 48 hours in steps of 1 hour\n future_datetime = base_datetime + pd.DateOffset(hours=i)\n horizon = i # horizon in hours\n\n try:\n # Attempt to select data for the future datetime\n selected_data = pv_ds[pv_id].sel(datetime=future_datetime)\n value = selected_data.values.item()\n value = value / 1000 # to convert from w to kw\n except KeyError:\n # If data is not found for the future datetime, set value as NaN\n value = np.nan\n\n # Add the data to the DataFrame\n combined_data.append(pd.DataFrame(\n {\"pv_id\": pv_id, \"timestamp\": future_datetime, \"value\": value, \"horizon_hour\": horizon}, index=[i])\n )\n combined_data = pd.concat(combined_data)\n return combined_data"
},
{
"identifier": "get_pv_metadata",
"path": "quartz_solar_forecast/eval/pv.py",
"snippet": "def get_pv_metadata(testset: pd.DataFrame):\n\n # download from hugginface or load from cache\n cache_dir = \"data/pv\"\n metadata_file = f\"{cache_dir}/metadata.csv\"\n if not os.path.exists(metadata_file):\n os.makedirs(cache_dir, exist_ok=True)\n fs.get(\"datasets/openclimatefix/uk_pv/metadata.csv\", metadata_file)\n\n # Load in the dataset\n metadata_df = pd.read_csv(metadata_file)\n\n # join metadata with testset\n metadata_df = metadata_df.rename(columns={\"ss_id\": \"pv_id\"})\n combined_data = testset.merge(metadata_df, on=\"pv_id\", how=\"left\")\n\n # only keep the columns we need\n combined_data = combined_data[\n [\"pv_id\", \"timestamp\", \"latitude_rounded\", \"longitude_rounded\", \"kwp\"]\n ]\n\n # rename latitude_rounded to latitude and longitude_rounded to longitude\n combined_data = combined_data.rename(\n columns={\n \"latitude_rounded\": \"latitude\",\n \"longitude_rounded\": \"longitude\",\n \"kwp\": \"capacity\",\n }\n )\n\n # format datetime\n combined_data[\"timestamp\"] = pd.to_datetime(combined_data[\"timestamp\"])\n\n return combined_data"
}
] | from quartz_solar_forecast.eval.pv import get_pv_truth, get_pv_metadata
import pandas as pd | 840 |
def test_get_pv_metadata():
test_set_df = pd.DataFrame(
[
{
"timestamp": pd.Timestamp("2021-01-26 01:15:00"),
"pv_id": 8215,
}
]
)
|
def test_get_pv_metadata():
test_set_df = pd.DataFrame(
[
{
"timestamp": pd.Timestamp("2021-01-26 01:15:00"),
"pv_id": 8215,
}
]
)
| metadata_df = get_pv_metadata(test_set_df) | 1 | 2023-11-16 07:37:42+00:00 | 2k |
newcastleuniversity/DISPEL | dispel/providers/generic/tasks/sbt_utt/sbt_func.py | [
{
"identifier": "MIN_MOTION_DUR",
"path": "dispel/providers/generic/tasks/sbt_utt/const.py",
"snippet": "MIN_MOTION_DUR = 1"
},
{
"identifier": "signal_duration",
"path": "dispel/signal/core.py",
"snippet": "def signal_duration(data: Union[pd.Series, pd.DataFrame]) -> float:\n \"\"\"Get signal duration from time-based indices.\n\n Parameters\n ----------\n data\n The signal of which we want to compute the duration based on its index. The\n index has to be either a TimedeltaIndex or DatetimeIndex.\n\n Returns\n -------\n float\n The duration of the signal (in seconds) from the index.\n \"\"\"\n assert isinstance(data.index, (pd.TimedeltaIndex, pd.DatetimeIndex))\n return (data.index.max() - data.index.min()).total_seconds()"
},
{
"identifier": "extract_ellipse_axes",
"path": "dispel/signal/geometric.py",
"snippet": "def extract_ellipse_axes(comps: pd.DataFrame) -> Tuple[float, float]:\n \"\"\"Extract length of the axes of an ellipse covering 95-percentile of data.\n\n Parameters\n ----------\n comps\n A pd.DataFrame with a 2-dimensional timeseries\n\n Returns\n -------\n Union[int, int]\n Tuple containing:\n major_axis : float\n The length of the major axis of an ellipse\n minor_axis : float\n The length of the minor axis of an ellipse\n \"\"\"\n # Extract PCA components of the 2-dimensional planar timeseries\n pca = PCA(n_components=2)\n pca = pca.fit(comps)\n\n # Transform distribution to canonical cartesian axes\n data_transformed = pca.transform(comps)\n data_transformed_df = pd.DataFrame(data_transformed, columns=[\"ap\", \"ml\"])\n\n # Compute the min and max boundaries of 95% of data covered by the ellipse\n ml_min = np.quantile(data_transformed_df.ml, 0.05)\n ml_max = np.quantile(data_transformed_df.ml, 0.95)\n ap_min = np.quantile(data_transformed_df.ap, 0.05)\n ap_max = np.quantile(data_transformed_df.ap, 0.95)\n\n # Compute the range of each axes (i.e., ml and ap)\n rang_ml = abs(ml_max - ml_min)\n rang_ap = abs(ap_max - ap_min)\n\n # Select the minor and major axes\n major_axis = max([rang_ml, rang_ap])\n minor_axis = min([rang_ml, rang_ap])\n\n return major_axis, minor_axis"
},
{
"identifier": "mean_norm_planar",
"path": "dispel/signal/vectorial.py",
"snippet": "def mean_norm_planar(comp1: pd.Series, comp2: pd.Series) -> float:\n \"\"\"Compute the mean norm of a 2-dimensional timeseries.\n\n The mean norm of a 2-dimensional timeseries is referred to as the Average\n Acceleration Amplitude eq. A2 of Martinez(2012)\n https://doi.org/10.1080/10255842.2011.565753\n\n Parameters\n ----------\n comp1\n The first component of the signal\n comp2\n The second component of the signal\n\n Returns\n -------\n float\n The average value of the norm of a 2 dimensional timeseries\n \"\"\"\n return resultant_norm_planar(comp1, comp2).mean()"
},
{
"identifier": "resultant_norm_planar",
"path": "dispel/signal/vectorial.py",
"snippet": "def resultant_norm_planar(comp1: pd.Series, comp2: pd.Series) -> pd.Series:\n \"\"\"Compute the norm of the resultant of a 2-dimensional vector on a plane.\n\n The norm of the resultant of 2-components represents the magnitude of a\n 2-dimensional vector.\n\n Parameters\n ----------\n comp1\n The first component of the signal\n comp2\n The second component of the signal\n\n Returns\n -------\n pd.Series\n A series comprising the norm values of the resultant of 2-dimensional\n vectorial timeseries\n \"\"\"\n return np.sqrt(comp1**2 + comp2**2)"
},
{
"identifier": "rms_planar",
"path": "dispel/signal/vectorial.py",
"snippet": "def rms_planar(comp1: pd.Series, comp2: pd.Series) -> float:\n \"\"\"Compute the RMS of a 2-dimensional timeseries.\n\n The Root-Mean-Square of a 2-dimensional timeseries as presented in eq. A4 of\n Martinez(2012) https://doi.org/10.1080/10255842.2011.565753\n\n Parameters\n ----------\n comp1\n The first component of the signal\n comp2\n The second component of the signal\n\n Returns\n -------\n float\n The RMS value of a 2-dimensional timeseries\n \"\"\"\n return np.sqrt(np.mean(comp1**2 + comp2**2))"
}
] | import numpy as np
import pandas as pd
from dispel.providers.generic.tasks.sbt_utt.const import MIN_MOTION_DUR
from dispel.signal.core import signal_duration
from dispel.signal.geometric import extract_ellipse_axes
from dispel.signal.vectorial import mean_norm_planar, resultant_norm_planar, rms_planar | 1,471 | """Functionality implemented in SBT.steps module."""
def label_bouts(data: pd.Series) -> pd.Series:
"""Label each valid and invalid chunk as a bout.
Parameters
----------
data
A Series that contains one column including the flag continuous
signal
Returns
-------
Series
A labelled pd.Series where each valid/invalid bout is assigned an
increasing integer number
"""
# We increase a counter number everytime the flag changes (solution
# inspired in StakOverflow community
return data.astype(bool).diff().fillna(method="bfill").cumsum()
def reject_short_bouts(bout_mask: pd.Series, flag: pd.Series) -> pd.Series:
"""Reject bouts whose duration is less than MIN_MOTION_DUR seconds.
Parameters
----------
bout_mask
A Series containing a flag_signal and a bout_number.
flag
A Series containing a flag_signal and a bout_number.
Returns
-------
Series
A Series with a flag_signal where the valence has been inverted
in case its duration is below MIN_MOTION_DUR seconds.
"""
flag = flag.astype(bool)
for _, bout in bout_mask.groupby(bout_mask):
| """Functionality implemented in SBT.steps module."""
def label_bouts(data: pd.Series) -> pd.Series:
"""Label each valid and invalid chunk as a bout.
Parameters
----------
data
A Series that contains one column including the flag continuous
signal
Returns
-------
Series
A labelled pd.Series where each valid/invalid bout is assigned an
increasing integer number
"""
# We increase a counter number everytime the flag changes (solution
# inspired in StakOverflow community
return data.astype(bool).diff().fillna(method="bfill").cumsum()
def reject_short_bouts(bout_mask: pd.Series, flag: pd.Series) -> pd.Series:
"""Reject bouts whose duration is less than MIN_MOTION_DUR seconds.
Parameters
----------
bout_mask
A Series containing a flag_signal and a bout_number.
flag
A Series containing a flag_signal and a bout_number.
Returns
-------
Series
A Series with a flag_signal where the valence has been inverted
in case its duration is below MIN_MOTION_DUR seconds.
"""
flag = flag.astype(bool)
for _, bout in bout_mask.groupby(bout_mask): | if signal_duration(bout) < MIN_MOTION_DUR: | 0 | 2023-11-14 10:06:46+00:00 | 2k |
runDMCA/home-assistant-mazda | custom_components/mazda/pymazda/sensordata/system_info.py | [
{
"identifier": "AndroidBuilds",
"path": "custom_components/mazda/pymazda/sensordata/android_builds.py",
"snippet": "class AndroidBuilds: # noqa: D101\n def __init__(self): # noqa: D107\n self.builds = None\n\n def get_builds(self): # noqa: D102\n if self.builds is None:\n self.builds = json.loads(ANDROID_BUILDS_JSON)\n\n return self.builds"
},
{
"identifier": "percent_encode",
"path": "custom_components/mazda/pymazda/sensordata/sensor_data_util.py",
"snippet": "def percent_encode(str): # noqa: D100, D103\n if str is None:\n return \"\"\n\n result_str = \"\"\n for char in str.encode():\n if (\n char >= 33\n and char <= 0x7E\n and char != 34\n and char != 37\n and char != 39\n and char != 44\n and char != 92\n ):\n result_str += chr(char)\n else:\n result_str += \"%\"\n result_str += format(char, \"x\").upper()\n return result_str"
},
{
"identifier": "sum_char_codes",
"path": "custom_components/mazda/pymazda/sensordata/sensor_data_util.py",
"snippet": "def sum_char_codes(str): # noqa: D103\n sum = 0\n for char in str.encode():\n if char < 0x80:\n sum += char\n return sum"
}
] | import random # noqa: D100
import secrets
from .android_builds import AndroidBuilds
from .sensor_data_util import percent_encode, sum_char_codes | 1,250 |
SCREEN_SIZES = [[1280, 720], [1920, 1080], [2560, 1440]]
ANDROID_VERSION_TO_SDK_VERSION = {
"11": 30,
"10": 29,
"9": 28,
"8.1.0": 27,
"8.0.0": 26,
"7.1": 25,
"7.0": 24,
}
class SystemInfo: # noqa: D101
def __init__(self): # noqa: D107
self.android_builds = AndroidBuilds()
def randomize(self): # noqa: D102
device_model, device = random.choice(
list(self.android_builds.get_builds().items())
)
codename = device["codename"]
build = random.choice(device["builds"])
build_version_incremental = random.randrange(1000000, 9999999)
self.screen_height, self.screen_width = random.choice(SCREEN_SIZES)
self.battery_charging = random.randrange(0, 10) <= 1
self.battery_level = random.randrange(10, 90)
self.orientation = 1
self.language = "en"
self.android_version = build["version"]
self.rotation_lock = "1" if random.randrange(0, 10) > 1 else "0"
self.build_model = device_model
self.build_bootloader = str(random.randrange(1000000, 9999999))
self.build_hardware = codename
self.package_name = "com.interrait.mymazda"
self.android_id = secrets.token_bytes(8).hex()
self.keyboard = 0
self.adb_enabled = False
self.build_version_codename = "REL"
self.build_version_incremental = build_version_incremental
self.build_version_sdk = ANDROID_VERSION_TO_SDK_VERSION.get(build["version"])
self.build_manufacturer = "Google"
self.build_product = codename
self.build_tags = "release-keys"
self.build_type = "user"
self.build_user = "android-build"
self.build_display = build["buildId"]
self.build_board = codename
self.build_brand = "google"
self.build_device = codename
self.build_fingerprint = f"google/{codename}/{codename}:{build['version']}/{build['buildId']}/{build_version_incremental}:user/release-keys"
self.build_host = f"abfarm-{random.randrange(10000, 99999)}"
self.build_id = build["buildId"]
def to_string(self): # noqa: D102
return ",".join(
[
"-1",
"uaend",
"-1",
str(self.screen_height),
str(self.screen_width),
("1" if self.battery_charging else "0"),
str(self.battery_level),
str(self.orientation),
percent_encode(self.language),
percent_encode(self.android_version),
self.rotation_lock,
percent_encode(self.build_model),
percent_encode(self.build_bootloader),
percent_encode(self.build_hardware),
"-1",
self.package_name,
"-1",
"-1",
self.android_id,
"-1",
str(self.keyboard),
"1" if self.adb_enabled else "0",
percent_encode(self.build_version_codename),
percent_encode(str(self.build_version_incremental)),
str(self.build_version_sdk),
percent_encode(self.build_manufacturer),
percent_encode(self.build_product),
percent_encode(self.build_tags),
percent_encode(self.build_type),
percent_encode(self.build_user),
percent_encode(self.build_display),
percent_encode(self.build_board),
percent_encode(self.build_brand),
percent_encode(self.build_device),
percent_encode(self.build_fingerprint),
percent_encode(self.build_host),
percent_encode(self.build_id),
]
)
def get_char_code_sum(self): # noqa: D102
|
SCREEN_SIZES = [[1280, 720], [1920, 1080], [2560, 1440]]
ANDROID_VERSION_TO_SDK_VERSION = {
"11": 30,
"10": 29,
"9": 28,
"8.1.0": 27,
"8.0.0": 26,
"7.1": 25,
"7.0": 24,
}
class SystemInfo: # noqa: D101
def __init__(self): # noqa: D107
self.android_builds = AndroidBuilds()
def randomize(self): # noqa: D102
device_model, device = random.choice(
list(self.android_builds.get_builds().items())
)
codename = device["codename"]
build = random.choice(device["builds"])
build_version_incremental = random.randrange(1000000, 9999999)
self.screen_height, self.screen_width = random.choice(SCREEN_SIZES)
self.battery_charging = random.randrange(0, 10) <= 1
self.battery_level = random.randrange(10, 90)
self.orientation = 1
self.language = "en"
self.android_version = build["version"]
self.rotation_lock = "1" if random.randrange(0, 10) > 1 else "0"
self.build_model = device_model
self.build_bootloader = str(random.randrange(1000000, 9999999))
self.build_hardware = codename
self.package_name = "com.interrait.mymazda"
self.android_id = secrets.token_bytes(8).hex()
self.keyboard = 0
self.adb_enabled = False
self.build_version_codename = "REL"
self.build_version_incremental = build_version_incremental
self.build_version_sdk = ANDROID_VERSION_TO_SDK_VERSION.get(build["version"])
self.build_manufacturer = "Google"
self.build_product = codename
self.build_tags = "release-keys"
self.build_type = "user"
self.build_user = "android-build"
self.build_display = build["buildId"]
self.build_board = codename
self.build_brand = "google"
self.build_device = codename
self.build_fingerprint = f"google/{codename}/{codename}:{build['version']}/{build['buildId']}/{build_version_incremental}:user/release-keys"
self.build_host = f"abfarm-{random.randrange(10000, 99999)}"
self.build_id = build["buildId"]
def to_string(self): # noqa: D102
return ",".join(
[
"-1",
"uaend",
"-1",
str(self.screen_height),
str(self.screen_width),
("1" if self.battery_charging else "0"),
str(self.battery_level),
str(self.orientation),
percent_encode(self.language),
percent_encode(self.android_version),
self.rotation_lock,
percent_encode(self.build_model),
percent_encode(self.build_bootloader),
percent_encode(self.build_hardware),
"-1",
self.package_name,
"-1",
"-1",
self.android_id,
"-1",
str(self.keyboard),
"1" if self.adb_enabled else "0",
percent_encode(self.build_version_codename),
percent_encode(str(self.build_version_incremental)),
str(self.build_version_sdk),
percent_encode(self.build_manufacturer),
percent_encode(self.build_product),
percent_encode(self.build_tags),
percent_encode(self.build_type),
percent_encode(self.build_user),
percent_encode(self.build_display),
percent_encode(self.build_board),
percent_encode(self.build_brand),
percent_encode(self.build_device),
percent_encode(self.build_fingerprint),
percent_encode(self.build_host),
percent_encode(self.build_id),
]
)
def get_char_code_sum(self): # noqa: D102 | return sum_char_codes(self.to_string()) | 2 | 2023-11-14 01:42:43+00:00 | 2k |
uysalserkan/url-shorter | app.py | [
{
"identifier": "URLS",
"path": "models/urls.py",
"snippet": "class URLS(SQLModel, table=True):\n id: Optional[int] = Field(default=None, primary_key=True)\n long_url: str = Field(nullable=False)\n generated_url: str = Field(nullable=True)\n created_date: int = datetime.utcnow().timestamp()\n expire_date: int = Field(nullable=False)\n\n @classmethod\n def generate_randoms(cls):\n \"\"\"Docstring.\"\"\"\n characters = string.ascii_letters + string.digits\n\n return ''.join(random.choice(characters) for _ in range(10))"
},
{
"identifier": "URLController",
"path": "controller/url_c.py",
"snippet": "class URLController:\n \"\"\"Universal URL Controller.\"\"\"\n @classmethod\n def get(cls, url_id):\n \"\"\"Get URL object with id field.\"\"\"\n try:\n url_obj = DB_engine.get(statement=select(URLS).where(URLS.id == url_id), first=True)\n\n return url_obj\n\n except Exception as exc:\n print(\"ERROR:\", exc)\n\n @classmethod\n def delete(cls, url_id):\n \"\"\"Delete a url with id field.\"\"\"\n try:\n url_obj = cls.get(url_id=url_id)\n status = DB_engine.delete(obj=url_obj, batch=False)\n if not status:\n raise Exception(\"Did not delete.\")\n\n except Exception as exc:\n print(\"ERROR:\", exc)"
},
{
"identifier": "settings",
"path": "config.py",
"snippet": ""
},
{
"identifier": "DatabaseEngine",
"path": "engines.py",
"snippet": "class DatabaseEngine(Singlethon):\n \"\"\"Database Engine.\"\"\"\n def __init__(self):\n sql_file_path = os.path.join(settings.DATABASE.FOLDER_PATH, settings.DATABASE.NAME)\n sqlite_url = f\"sqlite:///{sql_file_path}\"\n\n self.engine = create_engine(sqlite_url, echo=False)\n\n def get(self, statement, first: bool):\n \"\"\"Get elements of the sql statement.\"\"\"\n with Session(self.engine) as sess:\n results = sess.exec(\n statement=statement\n ).all()\n\n return results[0] if first else results\n\n def add(self, obj, batch: bool = False):\n \"\"\"Add object.\"\"\"\n if not batch:\n with Session(self.engine) as sess:\n sess.add(obj)\n sess.commit()\n sess.refresh(obj)\n\n def delete(self, obj, batch: bool = False) -> bool:\n \"\"\"Delete object.\"\"\"\n if not batch:\n with Session(self.engine) as sess:\n if not obj:\n return False\n\n sess.delete(obj)\n sess.commit()\n\n return True"
},
{
"identifier": "MinIOEngine",
"path": "engines.py",
"snippet": "class MinIOEngine(Singlethon):\n \"\"\"MinIO Engine\"\"\"\n def __init__(self):\n self.client = Minio(\n endpoint=settings.BUCKET.MINIO_SERVER,\n access_key=secrets.development.MINIO_USERNAME,\n secret_key=secrets.development.MINIO_PASSWORD,\n secure=False\n )\n self.bucket_name = settings.BUCKET.MINIO_BUCKET\n\n def add(self, file: UploadFile, short_name: str):\n try:\n _ = self.put_object(\n bucket_name=self.bucket_name,\n object_name=short_name,\n data=file.file,\n length=file.size,\n metadata={\n 'filename': file.filename,\n 'content_type': file.content_type,\n 'headers': file.headers,\n 'size': file.size\n }\n )\n\n except Exception as exc:\n print(\"Error:\", exc)\n\n def get(self, short_name: str):\n try:\n return self.client.get_object(\n bucket_name=self.bucket_name,\n object_name=short_name\n )\n\n except Exception as exc:\n print(\"Error\", exc)\n\n def create_bucket(self):\n try:\n if not self.client.bucket_exists(self.bucket_name):\n self.client.make_bucket(self.bucket_name)\n\n except Exception as exc:\n print(exc)\n\n def delete(self, short_url):\n try:\n self.client.remove_object(\n bucket_name=self.bucket_name,\n object_name=short_url\n )\n\n except Exception as exc:\n print(exc)"
}
] | import multiprocessing
import time
from datetime import datetime, timedelta
from fastapi import FastAPI, Response, UploadFile, Request
from fastapi.responses import RedirectResponse, JSONResponse
from sqlmodel import select
from prometheus_fastapi_instrumentator import Instrumentator
from models.urls import URLS
from controller.url_c import URLController
from config import settings, secrets
from engines import DatabaseEngine, MinIOEngine
from validators import url_validation | 1,116 | """URL Shorter API."""
app = FastAPI(
title="URL Shorter Service",
description="Short your long url links.",
)
Instrumentator().instrument(app).expose(app)
| """URL Shorter API."""
app = FastAPI(
title="URL Shorter Service",
description="Short your long url links.",
)
Instrumentator().instrument(app).expose(app)
| DB_engine = DatabaseEngine() | 3 | 2023-11-16 10:43:45+00:00 | 2k |
logicalroot/gpt-4v-demos | pages/3_📋_Quality_Control.py | [
{
"identifier": "show_code",
"path": "utils.py",
"snippet": "def show_code(code):\n \"\"\"Showing the code of the demo.\"\"\"\n show_code = st.sidebar.checkbox(\"Show code\", False)\n if show_code:\n st.markdown(\"## Code\")\n for function in code:\n # Showing the code of the demo.\n sourcelines, _ = inspect.getsourcelines(function)\n st.code(textwrap.dedent(\"\".join(sourcelines[0:])))"
},
{
"identifier": "extract_json",
"path": "parsers.py",
"snippet": "def extract_json(string):\n \"\"\"\n This function extracts the first valid JSON object from a given string.\n\n Parameters:\n string (str): The string from which to extract the JSON object.\n\n Returns:\n obj: The first valid JSON object found in the string.\n\n Raises:\n ValueError: If no valid JSON object is found in the string.\n \"\"\"\n start_positions = [pos for pos, char in enumerate(string) if char == \"{\"]\n end_positions = [pos for pos, char in enumerate(string) if char == \"}\"]\n\n for start in start_positions:\n for end in reversed(end_positions):\n if start < end:\n try:\n obj = json.loads(string[start : end + 1])\n return json.dumps(obj, indent=4, ensure_ascii=False)\n except JSONDecodeError:\n continue\n\n return \"{}\""
}
] | import streamlit as st
import base64
import requests
import json
import components
from utils import show_code
from parsers import extract_json | 762 |
def submit(image, api_key, issue_attributes):
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
base64_image = base64.b64encode(image).decode("utf-8")
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "system",
"content": "You are an expert quality control inspector for leading manufacturers.",
},
{
"role": "user",
"content": [
{
"type": "text",
"text": (
"Inspect this image and write a report in the following format:\n\n"
"```json\n"
"{\n"
' "issues": [\n'
" {\n"
f"{issue_attributes}\n"
" }\n"
" ]\n"
"}\n"
"```\n\n"
"If you see any signs of quality deterioration of any kind, such as corrosion, "
"physical damage, decay, or contamination, add them as separate issues in the "
"`issues` array. If there are no issues, the `issues` array should be empty. "
"Your response should contain only valid JSON."
),
},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
},
],
},
],
"max_tokens": 1024,
"temperature": 0.1,
# Response format not yet supported by GPT-4V
# "response_format": {"type": "json_object"},
}
try:
response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
)
response.raise_for_status()
|
def submit(image, api_key, issue_attributes):
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
base64_image = base64.b64encode(image).decode("utf-8")
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "system",
"content": "You are an expert quality control inspector for leading manufacturers.",
},
{
"role": "user",
"content": [
{
"type": "text",
"text": (
"Inspect this image and write a report in the following format:\n\n"
"```json\n"
"{\n"
' "issues": [\n'
" {\n"
f"{issue_attributes}\n"
" }\n"
" ]\n"
"}\n"
"```\n\n"
"If you see any signs of quality deterioration of any kind, such as corrosion, "
"physical damage, decay, or contamination, add them as separate issues in the "
"`issues` array. If there are no issues, the `issues` array should be empty. "
"Your response should contain only valid JSON."
),
},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
},
],
},
],
"max_tokens": 1024,
"temperature": 0.1,
# Response format not yet supported by GPT-4V
# "response_format": {"type": "json_object"},
}
try:
response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
)
response.raise_for_status()
| text = extract_json(response.json()["choices"][0]["message"]["content"]) | 1 | 2023-11-14 21:29:43+00:00 | 2k |
intel/llm-on-ray | inference/api_server_openai.py | [
{
"identifier": "RouterQueryClient",
"path": "inference/api_openai_backend/query_client.py",
"snippet": "class RouterQueryClient():\n def __init__(self, serve_deployments):\n self.serve_deployments = serve_deployments\n\n async def query(self, model: str, prompt: Prompt, request_id: str):\n response_stream = self.stream(\n model,\n prompt,\n request_id,\n )\n responses = [resp async for resp in response_stream]\n return ModelResponse.merge_stream(*responses)\n\n async def stream(\n self, model: str, prompt: Prompt, request_id: str\n ):\n if model in self.serve_deployments:\n deploy_handle = self.serve_deployments[model]\n else:\n raise HTTPException(404, f\"Could not find model with id {model}\")\n\n prompt_content = prompt.prompt\n request_config = prompt.parameters\n temperature = request_config.get(\"temperature\", 1.0)\n top_p = request_config.get(\"top_p\", 1.0)\n max_new_tokens = request_config.get(\"max_tokens\", None)\n gen_config = {\n \"max_new_tokens\": max_new_tokens,\n \"temperature\": temperature,\n \"top_p\": top_p,\n }\n if temperature != 1.0 or top_p != 1.0:\n gen_config.update({\"do_sample\": True})\n\n async for x in handle_request(\n model=model,\n prompt=prompt,\n request_id=request_id,\n async_iterator=deploy_handle.options(stream=True).stream_response.options(stream=True, use_new_handle_api=True).remote(prompt_content, gen_config)\n ):\n yield x\n\n async def model(self, model_id: str) -> ModelCard:\n \"\"\"Get configurations for a supported model\"\"\"\n return ModelCard(\n id=model_id,\n root=model_id,\n )\n\n async def models(self) -> Dict[str, ModelCard]:\n \"\"\"Get configurations for supported models\"\"\"\n metadatas = {}\n for model_id in self.serve_deployments:\n metadatas[model_id] = await self.model(model_id)\n return metadatas"
},
{
"identifier": "Router",
"path": "inference/api_openai_backend/router_app.py",
"snippet": "TIMEOUT = float(os.environ.get(\"ROUTER_HTTP_TIMEOUT\", 600))\ndef init() -> FastAPI:\nasync def _completions_wrapper(\n completion_id: str,\n body: CompletionRequest,\n response: Response,\n generator: AsyncGenerator[ModelResponse, None],\n) -> AsyncGenerator[str, None]:\nasync def _chat_completions_wrapper(\n completion_id: str,\n body: ChatCompletionRequest,\n response: Response,\n generator: AsyncGenerator[ModelResponse, None],\n) -> AsyncGenerator[str, None]:\n def __init__(\n self,\n query_client: RouterQueryClient,\n ) -> None:\n async def models(self) -> ModelList:\n async def model_data(self, model: str) -> ModelCard:\n async def completions(\n self,\n body: CompletionRequest,\n response: FastAPIResponse,\n ):\n async def chat(\n self,\n body: ChatCompletionRequest,\n response: FastAPIResponse,\n ):\n async def health_check(self) -> bool:\nclass Router:"
}
] | import os
from ray import serve
from inference.api_openai_backend.query_client import RouterQueryClient
from inference.api_openai_backend.router_app import Router, router_app | 1,317 | #
# Copyright 2023 The LLM-on-Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ===========================================================================
#
# This file is adapted from
# https://github.com/ray-project/ray-llm/blob/b3560aa55dadf6978f0de0a6f8f91002a5d2bed1/aviary/backend/server/run.py
# Copyright 2023 Anyscale
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def router_application(deployments):
"""Create a Router Deployment.
Router Deployment will point to a Serve Deployment for each specified base model,
and have a client to query each one.
"""
merged_client = RouterQueryClient(deployments)
RouterDeployment = serve.deployment(
route_prefix="/",
autoscaling_config={
"min_replicas": int(os.environ.get("ROUTER_MIN_REPLICAS", 2)),
"initial_replicas": int(os.environ.get("ROUTER_INITIAL_REPLICAS", 2)),
"max_replicas": int(os.environ.get("ROUTER_MAX_REPLICAS", 16)),
"target_num_ongoing_requests_per_replica": int(
os.environ.get("ROUTER_TARGET_NUM_ONGOING_REQUESTS_PER_REPLICA", 200)
),
},
max_concurrent_queries=1000, # Maximum backlog for a single replica
| #
# Copyright 2023 The LLM-on-Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ===========================================================================
#
# This file is adapted from
# https://github.com/ray-project/ray-llm/blob/b3560aa55dadf6978f0de0a6f8f91002a5d2bed1/aviary/backend/server/run.py
# Copyright 2023 Anyscale
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def router_application(deployments):
"""Create a Router Deployment.
Router Deployment will point to a Serve Deployment for each specified base model,
and have a client to query each one.
"""
merged_client = RouterQueryClient(deployments)
RouterDeployment = serve.deployment(
route_prefix="/",
autoscaling_config={
"min_replicas": int(os.environ.get("ROUTER_MIN_REPLICAS", 2)),
"initial_replicas": int(os.environ.get("ROUTER_INITIAL_REPLICAS", 2)),
"max_replicas": int(os.environ.get("ROUTER_MAX_REPLICAS", 16)),
"target_num_ongoing_requests_per_replica": int(
os.environ.get("ROUTER_TARGET_NUM_ONGOING_REQUESTS_PER_REPLICA", 200)
),
},
max_concurrent_queries=1000, # Maximum backlog for a single replica | )(serve.ingress(router_app)(Router)) | 1 | 2023-11-13 05:08:21+00:00 | 2k |
carlhampuswall/smartknob_ha | custom_components/smartknob/store.py | [
{
"identifier": "DATA_REGISTRY",
"path": "custom_components/smartknob/const.py",
"snippet": "DATA_REGISTRY = f\"{DOMAIN}_storage\""
},
{
"identifier": "SAVE_DELAY",
"path": "custom_components/smartknob/const.py",
"snippet": "SAVE_DELAY = 10"
},
{
"identifier": "STORAGE_KEY",
"path": "custom_components/smartknob/const.py",
"snippet": "STORAGE_KEY = f\"{DOMAIN}.storage\""
},
{
"identifier": "_LOGGER",
"path": "custom_components/smartknob/logger.py",
"snippet": "_LOGGER = logging.getLogger(__name__)"
}
] | from collections import OrderedDict
from collections.abc import MutableMapping
from typing import Dict, cast
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.storage import Store
from homeassistant.loader import bind_hass
from .const import DATA_REGISTRY, SAVE_DELAY, STORAGE_KEY
from .logger import _LOGGER
import attr | 758 |
@attr.s(slots=True, frozen=True)
class AppEntry:
"""App storage entry."""
app_id = attr.ib(type=str, default=None)
app_slug_id = attr.ib(type=str, default=None)
entity_id = attr.ib(type=str, default=None)
friendly_name = attr.ib(type=str, default=None)
@attr.s(slots=True, frozen=True)
class SmartknobConfig:
"""Smartknob device configuration, storage entry."""
mac_address = attr.ib(type=str, default=None)
apps = attr.ib(type=list[AppEntry], default=None)
class SmartknobStorage:
"""Class to hold Smartknob storage."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Smartknob storage."""
self.hass = hass
self.config: MutableMapping[
str, str
] = {} #! ADD SMARTKNOB DEVICE SPECIFIC CONFIG HERE
self.knobs: MutableMapping[str, SmartknobConfig] = {}
self._store = Store(hass, 1, STORAGE_KEY)
async def async_load(self) -> None:
"""Load the registry of Smartknob."""
data = await self._store.async_load()
knobs: "OrderedDict[str, AppEntry]" = OrderedDict()
if data is None:
return
if "knobs" in data:
for knob in data["knobs"]:
apps = [
AppEntry(
app_id=app["app_id"],
app_slug_id=app["app_slug_id"],
entity_id=app["entity_id"],
friendly_name=app["friendly_name"],
)
for (app) in knob["apps"]
]
knobs[knob["mac_address"]] = SmartknobConfig(
mac_address=knob["mac_address"], apps=apps
)
self.knobs = knobs
# TODO ADD CHECK IF NO APPS
# if not apps:
# await self.async_factory_default()
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the registry of alarmo."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
async def async_save(self) -> None:
"""Save the registry of Smartknob."""
await self._store.async_save(self._data_to_save())
@callback
def _data_to_save(self) -> dict:
store_data = {"knobs": [attr.asdict(entry) for entry in self.knobs.values()]}
# EXAMPLE OF ADDING MORE DATA TO STORE
# store_data["apps"] = [attr.asdict(entry) for entry in self.areas.values()]
return store_data
async def async_delete(self):
"""Delete all registry data."""
|
@attr.s(slots=True, frozen=True)
class AppEntry:
"""App storage entry."""
app_id = attr.ib(type=str, default=None)
app_slug_id = attr.ib(type=str, default=None)
entity_id = attr.ib(type=str, default=None)
friendly_name = attr.ib(type=str, default=None)
@attr.s(slots=True, frozen=True)
class SmartknobConfig:
"""Smartknob device configuration, storage entry."""
mac_address = attr.ib(type=str, default=None)
apps = attr.ib(type=list[AppEntry], default=None)
class SmartknobStorage:
"""Class to hold Smartknob storage."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the Smartknob storage."""
self.hass = hass
self.config: MutableMapping[
str, str
] = {} #! ADD SMARTKNOB DEVICE SPECIFIC CONFIG HERE
self.knobs: MutableMapping[str, SmartknobConfig] = {}
self._store = Store(hass, 1, STORAGE_KEY)
async def async_load(self) -> None:
"""Load the registry of Smartknob."""
data = await self._store.async_load()
knobs: "OrderedDict[str, AppEntry]" = OrderedDict()
if data is None:
return
if "knobs" in data:
for knob in data["knobs"]:
apps = [
AppEntry(
app_id=app["app_id"],
app_slug_id=app["app_slug_id"],
entity_id=app["entity_id"],
friendly_name=app["friendly_name"],
)
for (app) in knob["apps"]
]
knobs[knob["mac_address"]] = SmartknobConfig(
mac_address=knob["mac_address"], apps=apps
)
self.knobs = knobs
# TODO ADD CHECK IF NO APPS
# if not apps:
# await self.async_factory_default()
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the registry of alarmo."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
async def async_save(self) -> None:
"""Save the registry of Smartknob."""
await self._store.async_save(self._data_to_save())
@callback
def _data_to_save(self) -> dict:
store_data = {"knobs": [attr.asdict(entry) for entry in self.knobs.values()]}
# EXAMPLE OF ADDING MORE DATA TO STORE
# store_data["apps"] = [attr.asdict(entry) for entry in self.areas.values()]
return store_data
async def async_delete(self):
"""Delete all registry data.""" | _LOGGER.warning("Removing Smartknob configuration data!") | 3 | 2023-11-13 16:37:20+00:00 | 2k |
chuzhumin98/LLM_Eval | PRE/eval.py | [
{
"identifier": "DataLoader",
"path": "PRE/data.py",
"snippet": "class DataLoader:\n '''\n The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM\n '''\n def __init__(self, args):\n self.path_data = args['path_data'] # the load path for the data\n self.format = args['format'] # the data format, csv (need a title line) or json (each line is a single data item)\n self.path_prompt = args['path_prompt'] if 'path_prompt' in args else None # the path of prompt template. In the prompt template, using {{key}} for the replacement of the key. For example, in the prompt \"You need answer a question: {{question}}\", the \"question\" field need to be included in the data\n if not os.path.exists(self.path_data):\n raise FileExistsError(\"Load task data failed: file not exist!\")\n assert self.format in ['csv', 'json']\n \n \n def generate_reader(self):\n if self.format == 'csv':\n with open(self.path_data, encoding='utf-8') as f:\n gen = csv.DictReader(f, skipinitialspace=True)\n elif self.format == 'json':\n gen = open(self.path_data, encoding='utf-8')\n else:\n raise Exception(\"Invalid data format\")\n return gen\n \n def get_prompt(self):\n if self.path_prompt is None:\n raise Exception(\"Exception: missing argument path_prompt\")\n if not os.path.exists(self.path_prompt):\n raise FileExistsError(\"Load task prompt template failed: file not exist!\")\n self.template_prompt = open(self.path_prompt, encoding='utf-8').read().strip()\n \n gen = self.generate_reader()\n \n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n else:\n item = row\n \n prompt = self.template_prompt\n for key in item:\n prompt = prompt.replace(\"{{\" + key + \"}}\", item[key])\n yield prompt # a generator to return each prompt\n \n def get_task_items(self):\n data_list = []\n gen = self.generate_reader()\n for row in gen:\n if self.format == 'json':\n item = json.loads(row.strip())\n elif self.format == 'csv':\n item = dict(row)\n data_list.append(item)\n return data_list"
},
{
"identifier": "Auto_API",
"path": "PRE/api.py",
"snippet": "class Auto_API:\n @staticmethod\n def instantiate_api(api_type, args) -> LLM_API:\n for at, _API in API_type2class_list:\n if api_type == at:\n return _API(args)\n raise Exception(f\"Invalid api_type: {api_type}\")"
},
{
"identifier": "parse_response",
"path": "PRE/utils.py",
"snippet": "def parse_response(response, parse_type, nominal_list=None, nominal_ticks=None):\n '''\n parse_type: int, float or str\n if parse_type = str, then required parameter nominal_list and nominal_ticks\n nominal_list: a series of nominal types, its name\n nomianl_ticks: the corresponding nominal number (int)\n '''\n assert parse_type in ['int', 'float', 'str']\n if parse_type == 'int':\n nums = re.findall(r\"-?\\d+\", response)\n if len(nums) == 0:\n return None\n return int(nums[0])\n elif parse_type == 'float':\n nums = re.findall(r\"-?\\d+\\.?\\d*\", response)\n if len(nums) == 0:\n return None\n return int(nums[0])\n elif parse_type == 'str':\n appear_pos, cur_idx = math.inf, -1\n response = response.lower()\n for idx, label in enumerate(nominal_list):\n pos = response.find(label.lower())\n if pos != -1: # really appear!\n if pos < appear_pos:\n appear_pos, cur_idx = pos, idx\n if cur_idx == -1:\n return None\n else:\n return nominal_ticks[cur_idx]"
}
] | import os
import yaml
import warnings
import json
import copy
import sys
import numpy as np
from PRE.data import DataLoader
from PRE.api import Auto_API
from PRE.utils import parse_response | 1,253 | '''
The implement of the peer review and result aggregation module
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class PEER_REVIEW:
'''
Conduct peer review, process for one prompt (pairwise or pointwise)
'''
def __init__(self, args) -> None:
self.parser_type = args['parser_type'] # int, float, str
self.task_name = args['task_name']
self.save_dir = args['save_dir']
if self.parser_type == 'str':
self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')]
self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_ticks'].split(',')]
else:
self.nominal_list, self.nominal_ticks = None, None
def peer_review_single_round(self, reviewers, prompts):
'''
used in gaming sampling strategy
reviewers: LLM config list
prompts: an array, each item is a dict with key "prompt"
return a dict to denote the results of each evaluate task under all the reviews, key: reviewer model name, value: the original response of this reviewer
'''
| '''
The implement of the peer review and result aggregation module
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class PEER_REVIEW:
'''
Conduct peer review, process for one prompt (pairwise or pointwise)
'''
def __init__(self, args) -> None:
self.parser_type = args['parser_type'] # int, float, str
self.task_name = args['task_name']
self.save_dir = args['save_dir']
if self.parser_type == 'str':
self.nominal_list = [nn.strip() for nn in args['nominal_list'].split(',')]
self.nominal_ticks = [int(nn.strip()) for nn in args['nominal_ticks'].split(',')]
else:
self.nominal_list, self.nominal_ticks = None, None
def peer_review_single_round(self, reviewers, prompts):
'''
used in gaming sampling strategy
reviewers: LLM config list
prompts: an array, each item is a dict with key "prompt"
return a dict to denote the results of each evaluate task under all the reviews, key: reviewer model name, value: the original response of this reviewer
''' | apis_reviewer = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in reviewers] | 1 | 2023-11-16 18:40:23+00:00 | 2k |
tahaafarooq/werkzeug-hash-cracker | cracker.py | [
{
"identifier": "SimplifierSingle",
"path": "simplifiers/simplifier.py",
"snippet": "class SimplifierSingle(object):\n def __init__(self, hasho, wordlist):\n self.hasho = hasho\n self.wordlist = wordlist\n\n def crack_single_hash(self):\n with open(self.wordlist, \"r\", encoding=\"latin-1\") as wordlist_file:\n for word in wordlist_file:\n words = word.strip().split()\n for line in words:\n check_hash = check_password_hash(self.hasho, line)\n if check_hash:\n print(f\"Hash: {self.hasho} Has Password {line}\")\n exit(0)\n else:\n continue"
},
{
"identifier": "SimplifierFile",
"path": "simplifiers/simplifier.py",
"snippet": "class SimplifierFile(object):\n def __init__(self, hash_file, wordlist):\n self.hash_file = hash_file\n self.wordlist = wordlist\n self.hashes = {}\n self.hashes_cracked = {}\n\n def interprete_hash_file(self):\n with open(self.hash_file, \"r\", encoding=\"latin-1\") as hashs:\n for hasho in hashs:\n words = hasho.strip().split()\n for line in words:\n self.hashes[line] = True\n return \"Saved The Hashes\"\n\n def crack_hash_file(self):\n with open(self.hash_file, \"r\") as hasho:\n hasho = hasho.read().split()\n\n with open(self.wordlist, \"r\", encoding=\"latin-1\") as wordlist_file:\n raw_words = wordlist_file.read().split()\n words = Queue()\n\n for word in raw_words:\n words.put(word)\n\n while not words.empty():\n for i in range(0, len(hasho)):\n password = words.get()\n if check_password_hash(hasho[i], password):\n print(f\"Hash: {hasho[i]} Has Password {password}\")\n break\n else:\n continue\n break\n exit(0)\n\n def check_results(self):\n if self.hashes_cracked is not None:\n return self.hashes_cracked"
}
] | import argparse
from simplifiers.simplifier import SimplifierSingle, SimplifierFile | 778 |
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Werkzeug Security Hash Cracker :: @tahaafarooq")
parser.add_argument('--single', nargs=2, metavar=('hash', 'wordlist'), help='Crack a single hash string')
parser.add_argument('--file', nargs=2, metavar=('hashfile', 'wordlist'), help='Crack a file with multiple hashes')
parser.add_argument('--about', action='store_true', help='Print core information about the script and developer')
args = parser.parse_args()
if args.about:
about = """
Werkzeug Hash Cracker: Is a minimal script that cracks hashes which are generated from werkzeug.security library in python\n
About Developer: Tahaa Farooq is a cybersecurity professional with a passion in programming. Check his github for more information (https://github.com/tahaafarooq)"""
print(about)
elif args.single:
hash_string, wordlist_file = args.single
simple_crack = SimplifierSingle(hash_string, wordlist_file)
simple_crack.crack_single_hash()
elif args.file:
hash_file, wordlist_file = args.file
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Werkzeug Security Hash Cracker :: @tahaafarooq")
parser.add_argument('--single', nargs=2, metavar=('hash', 'wordlist'), help='Crack a single hash string')
parser.add_argument('--file', nargs=2, metavar=('hashfile', 'wordlist'), help='Crack a file with multiple hashes')
parser.add_argument('--about', action='store_true', help='Print core information about the script and developer')
args = parser.parse_args()
if args.about:
about = """
Werkzeug Hash Cracker: Is a minimal script that cracks hashes which are generated from werkzeug.security library in python\n
About Developer: Tahaa Farooq is a cybersecurity professional with a passion in programming. Check his github for more information (https://github.com/tahaafarooq)"""
print(about)
elif args.single:
hash_string, wordlist_file = args.single
simple_crack = SimplifierSingle(hash_string, wordlist_file)
simple_crack.crack_single_hash()
elif args.file:
hash_file, wordlist_file = args.file | simple_crack = SimplifierFile(hash_file, wordlist_file) | 1 | 2023-11-10 01:29:15+00:00 | 2k |
victor0089/AirBnB_clone_v2 | models/engine/db_storage.py | [
{
"identifier": "Base",
"path": "models/base_model.py",
"snippet": "class BaseModel:\n def __init__(self, *args, **kwargs):\n def __str__(self):\n def __repr__(self):\n def save(self):\n def to_dict(self):\n def delete(self):"
},
{
"identifier": "State",
"path": "models/state.py",
"snippet": "class State(BaseModel, Base):\n \"\"\"This is the class for State\n Attributes:\n name: input name\n \"\"\"\n __tablename__ = \"states\"\n name = Column(String(128), nullable=False)\n cities = relationship(\"City\", cascade='all, delete, delete-orphan',\n backref=\"state\")\n\n @property\n def cities(self):\n var = models.storage.all()\n lista = []\n result = []\n for key in var:\n city = key.replace('.', ' ')\n city = shlex.split(city)\n if (city[0] == 'City'):\n lista.append(var[key])\n for elem in lista:\n if (elem.state_id == self.id):\n result.append(elem)\n return (result)"
},
{
"identifier": "City",
"path": "models/city.py",
"snippet": "class City(BaseModel, Base):\n \"\"\"This is the class for City\n Attributes:\n state_id: The state id\n name: input name\n \"\"\"\n __tablename__ = \"cities\"\n name = Column(String(128), nullable=False)\n state_id = Column(String(60), ForeignKey('states.id'), nullable=False)\n places = relationship(\"Place\", cascade='all, delete, delete-orphan',\n backref=\"cities\")"
},
{
"identifier": "User",
"path": "models/user.py",
"snippet": "class User(BaseModel, Base):\n \"\"\"This is the class for user\n Attributes:\n email: email address\n password: password for you login\n first_name: first name\n last_name: last name\n \"\"\"\n __tablename__ = \"users\"\n email = Column(String(128), nullable=False)\n password = Column(String(128), nullable=False)\n first_name = Column(String(128))\n last_name = Column(String(128))\n places = relationship(\"Place\", cascade='all, delete, delete-orphan',\n backref=\"user\")\n reviews = relationship(\"Review\", cascade='all, delete, delete-orphan',\n backref=\"user\")"
},
{
"identifier": "Place",
"path": "models/place.py",
"snippet": "class Place(BaseModel, Base):\n \"\"\"This is the class for Place\n Attributes:\n city_id: city id\n user_id: user id\n name: name input\n description: string of description\n number_rooms: number of room in int\n number_bathrooms: number of bathrooms in int\n max_guest: maximum guest in int\n price_by_night:: pice for a staying in int\n latitude: latitude in flaot\n longitude: longitude in float\n amenity_ids: list of Amenity ids\n \"\"\"\n __tablename__ = \"places\"\n city_id = Column(String(60), ForeignKey(\"cities.id\"), nullable=False)\n user_id = Column(String(60), ForeignKey(\"users.id\"), nullable=False)\n name = Column(String(128), nullable=False)\n description = Column(String(1024))\n number_rooms = Column(Integer, nullable=False, default=0)\n number_bathrooms = Column(Integer, nullable=False, default=0)\n max_guest = Column(Integer, nullable=False, default=0)\n price_by_night = Column(Integer, nullable=False, default=0)\n latitude = Column(Float)\n longitude = Column(Float)\n amenity_ids = []\n\n if getenv(\"HBNB_TYPE_STORAGE\") == \"db\":\n reviews = relationship(\"Review\", cascade='all, delete, delete-orphan',\n backref=\"place\")\n\n amenities = relationship(\"Amenity\", secondary=place_amenity,\n viewonly=False,\n back_populates=\"place_amenities\")\n else:\n @property\n def reviews(self):\n \"\"\" Returns list of reviews.id \"\"\"\n var = models.storage.all()\n lista = []\n result = []\n for key in var:\n review = key.replace('.', ' ')\n review = shlex.split(review)\n if (review[0] == 'Review'):\n lista.append(var[key])\n for elem in lista:\n if (elem.place_id == self.id):\n result.append(elem)\n return (result)\n\n @property\n def amenities(self):\n \"\"\" Returns list of amenity ids \"\"\"\n return self.amenity_ids\n\n @amenities.setter\n def amenities(self, obj=None):\n \"\"\" Appends amenity ids to the attribute \"\"\"\n if type(obj) is Amenity and obj.id not in self.amenity_ids:\n self.amenity_ids.append(obj.id)"
},
{
"identifier": "Review",
"path": "models/review.py",
"snippet": "class Review(BaseModel, Base):\n \"\"\"This is the class for Review\n Attributes:\n place_id: place id\n user_id: user id\n text: review description\n \"\"\"\n __tablename__ = \"reviews\"\n text = Column(String(1024), nullable=False)\n place_id = Column(String(60), ForeignKey(\"places.id\"), nullable=False)\n user_id = Column(String(60), ForeignKey(\"users.id\"), nullable=False)"
},
{
"identifier": "Amenity",
"path": "models/amenity.py",
"snippet": "class Amenity(BaseModel, Base):\n \"\"\"This is the class for Amenity\n Attributes:\n name: input name\n \"\"\"\n __tablename__ = \"amenities\"\n name = Column(String(128), nullable=False)\n place_amenities = relationship(\"Place\", secondary=place_amenity)"
}
] | from os import getenv
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import (create_engine)
from sqlalchemy.ext.declarative import declarative_base
from models.base_model import Base
from models.state import State
from models.city import City
from models.user import User
from models.place import Place
from models.review import Review
from models.amenity import Amenity | 1,599 | #!/usr/bin/python3
""" new class for sqlAlchemy """
class DBStorage:
""" create tables in environmental"""
__engine = None
__session = None
def __init__(self):
'''instantiate new dbstorage instance'''
HBNB_MYSQL_USER = getenv('HBNB_MYSQL_USER')
HBNB_MYSQL_PWD = getenv('HBNB_MYSQL_PWD')
HBNB_MYSQL_HOST = getenv('HBNB_MYSQL_HOST')
HBNB_MYSQL_DB = getenv('HBNB_MYSQL_DB')
HBNB_ENV = getenv('HBNB_ENV')
self.__engine = create_engine(
'mysql+mysqldb://{}:{}@{}/{}'.format(
HBNB_MYSQL_USER,
HBNB_MYSQL_PWD,
HBNB_MYSQL_HOST,
HBNB_MYSQL_DB
), pool_pre_ping=True)
if HBNB_ENV == 'test':
| #!/usr/bin/python3
""" new class for sqlAlchemy """
class DBStorage:
""" create tables in environmental"""
__engine = None
__session = None
def __init__(self):
'''instantiate new dbstorage instance'''
HBNB_MYSQL_USER = getenv('HBNB_MYSQL_USER')
HBNB_MYSQL_PWD = getenv('HBNB_MYSQL_PWD')
HBNB_MYSQL_HOST = getenv('HBNB_MYSQL_HOST')
HBNB_MYSQL_DB = getenv('HBNB_MYSQL_DB')
HBNB_ENV = getenv('HBNB_ENV')
self.__engine = create_engine(
'mysql+mysqldb://{}:{}@{}/{}'.format(
HBNB_MYSQL_USER,
HBNB_MYSQL_PWD,
HBNB_MYSQL_HOST,
HBNB_MYSQL_DB
), pool_pre_ping=True)
if HBNB_ENV == 'test': | Base.metadata.drop_all(self.__engine) | 0 | 2023-11-17 07:59:13+00:00 | 2k |
believethehype/nostrdvm | examples/ollama_dvm/main.py | [
{
"identifier": "TextGenerationLLMLite",
"path": "nostr_dvm/tasks/textgeneration_llmlite.py",
"snippet": "class TextGenerationLLMLite(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT\n TASK: str = \"text-to-text\"\n FIX_COST: float = 0\n dependencies = [(\"nostr-dvm\", \"nostr-dvm\"),\n (\"litellm\", \"litellm==1.12.3\")]\n\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n dvm_config.SCRIPT = os.path.abspath(__file__)\n super().__init__(name, dvm_config, nip89config, admin_config, options)\n\n def is_input_supported(self, tags, client=None, dvm_config=None):\n for tag in tags:\n if tag.as_vec()[0] == 'i':\n input_value = tag.as_vec()[1]\n input_type = tag.as_vec()[2]\n if input_type != \"text\":\n return False\n\n return True\n\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n request_form = {\"jobID\": event.id().to_hex() + \"_\" + self.NAME.replace(\" \", \"\")}\n prompt = \"\"\n if self.options.get(\"default_model\") and self.options.get(\"default_model\") != \"\":\n model = self.options['default_model']\n else:\n model = \"gpt-3.5-turbo\" # \"gpt-4-1106-preview\" # This will call chatgpt and requires an OpenAI API Key set in .env\n if self.options.get(\"server\") and self.options.get(\"server\") != \"\":\n server = self.options['server']\n else:\n server = \"http://localhost:11434\" # default ollama server. This will only be used for ollama models.\n\n for tag in event.tags():\n if tag.as_vec()[0] == 'i':\n input_type = tag.as_vec()[2]\n if input_type == \"text\":\n prompt = tag.as_vec()[1]\n\n options = {\n \"prompt\": prompt,\n \"model\": model,\n \"server\": server\n }\n request_form['options'] = json.dumps(options)\n\n return request_form\n\n def process(self, request_form):\n from litellm import completion\n\n options = DVMTaskInterface.set_options(request_form)\n\n try:\n if options[\"model\"].startswith(\"ollama\"):\n response = completion(\n model=options[\"model\"],\n messages=[{\"content\": options[\"prompt\"], \"role\": \"user\"}],\n api_base=options[\"server\"],\n stream=False\n )\n print(response.choices[0].message.content)\n return response.choices[0].message.content\n else:\n response = completion(\n model=options[\"model\"],\n messages=[{\"content\": options[\"prompt\"], \"role\": \"user\"}],\n )\n print(response.choices[0].message.content)\n return response.choices[0].message.content\n\n except Exception as e:\n print(\"Error in Module: \" + str(e))\n raise Exception(e)"
},
{
"identifier": "AdminConfig",
"path": "nostr_dvm/utils/admin_utils.py",
"snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\""
},
{
"identifier": "build_default_config",
"path": "nostr_dvm/utils/dvmconfig.py",
"snippet": "def build_default_config(identifier):\n dvm_config = DVMConfig()\n dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)\n dvm_config.IDENTIFIER = identifier\n npub = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_bech32()\n invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub)\n dvm_config.LNBITS_INVOICE_KEY = invoice_key\n dvm_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back\n dvm_config.LNBITS_URL = os.getenv(\"LNBITS_HOST\")\n dvm_config.LN_ADDRESS = lnaddress\n return dvm_config"
},
{
"identifier": "NIP89Config",
"path": "nostr_dvm/utils/nip89_utils.py",
"snippet": "class NIP89Config:\n DTAG: str = \"\"\n NAME: str = \"\"\n KIND: int = None\n PK: str = \"\"\n CONTENT: str = \"\""
},
{
"identifier": "check_and_set_d_tag",
"path": "nostr_dvm/utils/nip89_utils.py",
"snippet": "def check_and_set_d_tag(identifier, name, pk, imageurl):\n if not os.getenv(\"NIP89_DTAG_\" + identifier.upper()):\n new_dtag = nip89_create_d_tag(name, Keys.from_sk_str(pk).public_key().to_hex(),\n imageurl)\n nip89_add_dtag_to_env_file(\"NIP89_DTAG_\" + identifier.upper(), new_dtag)\n print(\"Some new dtag:\" + new_dtag)\n return new_dtag\n else:\n return os.getenv(\"NIP89_DTAG_\" + identifier.upper())"
}
] | import json
import dotenv
from pathlib import Path
from nostr_dvm.tasks.textgeneration_llmlite import TextGenerationLLMLite
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag | 1,406 |
def main():
identifier = "llama2"
name = "Ollama"
|
def main():
identifier = "llama2"
name = "Ollama"
| dvm_config = build_default_config(identifier) | 2 | 2023-11-17 18:32:56+00:00 | 2k |
zouXH-god/meme_web | meme_generator/manager.py | [
{
"identifier": "meme_config",
"path": "meme_generator/config.py",
"snippet": "class MemeConfig(BaseModel):\nclass ResourceConfig(BaseModel):\nclass GifConfig(BaseModel):\nclass TranslatorConfig(BaseModel):\nclass ServerConfig(BaseModel):\nclass LogConfig(BaseModel):\nclass Config(BaseModel, extra=Extra.ignore):\n def load(cls) -> \"Config\":\n def dump(self):"
},
{
"identifier": "NoSuchMeme",
"path": "meme_generator/exception.py",
"snippet": "class NoSuchMeme(MemeGeneratorException):\n status_code: int = 531\n\n def __init__(self, meme_key: str):\n self.meme_key = meme_key\n message = f'No such meme with key=\"{self.meme_key}\"'\n super().__init__(message)"
},
{
"identifier": "logger",
"path": "meme_generator/log.py",
"snippet": "class LoguruHandler(logging.Handler):\n def emit(self, record: logging.LogRecord):\ndef setup_logger():\n def default_filter(record: \"Record\"):\nLOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"default\": {\n \"class\": \"meme_generator.log.LoguruHandler\",\n },\n },\n \"loggers\": {\n \"uvicorn.error\": {\"handlers\": [\"default\"], \"level\": \"INFO\"},\n \"uvicorn.access\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\",\n },\n },\n}"
},
{
"identifier": "Meme",
"path": "meme_generator/meme.py",
"snippet": "class UserInfo(BaseModel):\nclass MemeArgsModel(BaseModel):\nclass MemeArgsParser(ArgumentParser):\nclass MemeArgsType:\nclass MemeParamsType:\nclass Meme:\n def _print_message(self, message: str, file: Optional[IO[str]] = None):\n def exit(self, status: int = 0, message: Optional[str] = None):\n async def __call__(\n self,\n *,\n images: Union[List[str], List[Path], List[bytes], List[BytesIO]] = [],\n texts: List[str] = [],\n args: Dict[str, Any] = {},\n ) -> BytesIO:\n def parse_args(self, args: List[str] = []) -> Dict[str, Any]:\n async def generate_preview(self, *, args: Dict[str, Any] = {}) -> BytesIO:\n async def _generate_preview(images: List[BytesIO], texts: List[str]):"
}
] | import importlib
import importlib.util
import pkgutil
from pathlib import Path
from typing import Dict, List, Optional, Union
from .config import meme_config
from .exception import NoSuchMeme
from .log import logger
from .meme import Meme, MemeArgsType, MemeFunction, MemeParamsType | 942 |
_memes: Dict[str, Meme] = {}
def path_to_module_name(path: Path) -> str:
rel_path = path.resolve().relative_to(Path.cwd().resolve())
if rel_path.stem == "__init__":
return ".".join(rel_path.parts[:-1])
else:
return ".".join(rel_path.parts[:-1] + (rel_path.stem,))
def load_meme(module_path: Union[str, Path]):
module_name = (
path_to_module_name(module_path)
if isinstance(module_path, Path)
else module_path
)
try:
importlib.import_module(module_name)
except Exception as e:
logger.opt(colors=True, exception=e).error(f"Failed to import {module_path}!")
def load_memes(dir_path: Union[str, Path]):
if isinstance(dir_path, Path):
dir_path = str(dir_path.resolve())
for module_info in pkgutil.iter_modules([dir_path]):
if module_info.name.startswith("_"):
continue
if not (
module_spec := module_info.module_finder.find_spec(module_info.name, None)
):
continue
if not (module_path := module_spec.origin):
continue
if not (module_loader := module_spec.loader):
continue
try:
module = importlib.util.module_from_spec(module_spec)
module_loader.exec_module(module)
except Exception as e:
logger.opt(colors=True, exception=e).error(
f"Failed to import {module_path}!"
)
def add_meme(
key: str,
|
_memes: Dict[str, Meme] = {}
def path_to_module_name(path: Path) -> str:
rel_path = path.resolve().relative_to(Path.cwd().resolve())
if rel_path.stem == "__init__":
return ".".join(rel_path.parts[:-1])
else:
return ".".join(rel_path.parts[:-1] + (rel_path.stem,))
def load_meme(module_path: Union[str, Path]):
module_name = (
path_to_module_name(module_path)
if isinstance(module_path, Path)
else module_path
)
try:
importlib.import_module(module_name)
except Exception as e:
logger.opt(colors=True, exception=e).error(f"Failed to import {module_path}!")
def load_memes(dir_path: Union[str, Path]):
if isinstance(dir_path, Path):
dir_path = str(dir_path.resolve())
for module_info in pkgutil.iter_modules([dir_path]):
if module_info.name.startswith("_"):
continue
if not (
module_spec := module_info.module_finder.find_spec(module_info.name, None)
):
continue
if not (module_path := module_spec.origin):
continue
if not (module_loader := module_spec.loader):
continue
try:
module = importlib.util.module_from_spec(module_spec)
module_loader.exec_module(module)
except Exception as e:
logger.opt(colors=True, exception=e).error(
f"Failed to import {module_path}!"
)
def add_meme(
key: str, | function: MemeFunction, | 3 | 2023-11-12 12:31:53+00:00 | 2k |
embrake/Aquilify | aquilify/middlewares/dispatcher.py | [
{
"identifier": "ASGIApp",
"path": "aquilify/types.py",
"snippet": "T = typing.TypeVar(\"T\")"
},
{
"identifier": "JsonResponse",
"path": "aquilify/responses.py",
"snippet": "class JsonResponse(BaseResponse):\n def __init__(\n self,\n content: Union[Dict, Callable, None] = {},\n status: Optional[int] = 200,\n headers: Optional[Dict[str, Union[str, int]]] = None,\n content_type: str = 'application/json',\n encoding: Optional[str] = 'utf-8',\n validate: Optional[bool] = False,\n ) -> None:\n \"\"\"\n Create a JSON response.\n\n Args:\n content (Union[Dict, Callable, None]): The response content (as a dictionary).\n status (Optional[int]): The HTTP status code (default is 200).\n headers (Optional[Dict[str, Union[str, int]]]): Additional headers for the response.\n content_type (str): The content type for the response (default is 'application/json').\n encoding (Optional[str]): The character encoding for JSON content (default is 'utf-8').\n validate (Optional[bool]): Whether to validate the JSON data (default is False).\n\n \"\"\"\n if validate:\n try:\n json.dumps(content)\n except ValueError:\n raise ValueError(\"Invalid JSON content\")\n\n super().__init__(json.dumps(content, ensure_ascii=False), status, headers)\n self.headers.setdefault('Content-Type', f'{content_type}; charset={encoding}')"
}
] | import logging
from typing import Awaitable, Callable, Dict, Optional, Union
from ..types import ASGIApp, Receive, Scope, Send
from ..responses import JsonResponse | 1,306 | class Dispatcher:
"""
Dispatches incoming requests to different mounted ASGI apps based on the URL path.
Usage:
```python
# Create the main application
main_app = Aquilify()
# Create instances of the mounted apps
app1 = Aquilify()
app2 = Aquilify()
# Create the Dispatcher instance
dispatcher = Dispatcher(main_app, {})
# Map app1 to /app1 and app2 to /app2
dispatcher.map_url('/app1', app1)
dispatcher.map_url('/app2', app2)
# Define error handlers if necessary
async def error_handler1(scope, receive, send, exc):
# Custom error handling logic for app1
pass
async def error_handler2(scope, receive, send, exc):
# Custom error handling logic for app2
pass
dispatcher.map_url('/app1', app1, error_handler1)
dispatcher.map_url('/app2', app2, error_handler2)
# Run the dispatcher
@app.route("/")
async def homepage(request):
return JsonResponse({"message": "Hello, world!"})
@app.route("/app1")
async def app1_homepage(request):
return JSONResponse({"message": "App 1 homepage"})
@app.route("/app2")
async def app2_homepage(request):
return JSONResponse({"message": "App 2 homepage"})
```
"""
def __init__(self, main_app: ASGIApp, mounts: Dict[str, ASGIApp]) -> None:
"""
Initializes the Dispatcher instance.
Args:
main_app (ASGIApp): The main ASGI app to handle the requests.
mounts (Dict[str, ASGIApp]): A dictionary containing mounted apps.
Usage:
```python
main_app = Aquilify() # create a main app instance
app2 = Aquilify() #sub app for mounting in main_app
dispatcher = Dispatcher(main_app, {
'/app2': app2
})
Run:
$ netix --debug main:dispatcher
---------- or -----------
$ uvicorn main:dispatcher
"""
self.main_app: ASGIApp = main_app
self.mounts: Dict[str, ASGIApp] = mounts
self.error_handlers: Dict[str, Optional[Callable[..., Awaitable[None]]]] = {
mount_point: None for mount_point in mounts
}
self.logger = logging.getLogger(__name__)
def map_url(self, mount_point: str, app: ASGIApp,
error_handler: Optional[Callable[..., Awaitable[None]]] = None) -> None:
"""
Maps a URL mount point to a specified ASGI app.
Args:
mount_point (str): The URL mount point.
app (ASGIApp): The ASGI app to mount at the specified point.
error_handler (Optional[Callable[..., Awaitable[None]]]): Error handler for this mounted app.
"""
self.mounts[mount_point] = app
self.error_handlers[mount_point] = error_handler
def unmap_url(self, mount_point: str) -> None:
"""
Unmaps a URL mount point, removing the mounted app.
Args:
mount_point (str): The URL mount point to unmap.
"""
if mount_point in self.mounts:
del self.mounts[mount_point]
del self.error_handlers[mount_point]
async def conditional_mount(self, mount_point: str, app: ASGIApp,
condition: Union[Callable, Awaitable[bool]],
error_handler: Optional[Callable[..., Awaitable[None]]] = None) -> None:
"""
Mounts an ASGI app based on a specified condition.
Args:
mount_point (str): The URL mount point.
app (ASGIApp): The ASGI app to mount at the specified point.
condition (Union[Callable, Awaitable[bool]]): Condition to decide the mounting.
error_handler (Optional[Callable[..., Awaitable[None]]]): Error handler for this mounted app.
"""
if callable(condition):
condition = await condition()
if condition:
self.mounts[mount_point] = app
self.error_handlers[mount_point] = error_handler
|
class Dispatcher:
"""
Dispatches incoming requests to different mounted ASGI apps based on the URL path.
Usage:
```python
# Create the main application
main_app = Aquilify()
# Create instances of the mounted apps
app1 = Aquilify()
app2 = Aquilify()
# Create the Dispatcher instance
dispatcher = Dispatcher(main_app, {})
# Map app1 to /app1 and app2 to /app2
dispatcher.map_url('/app1', app1)
dispatcher.map_url('/app2', app2)
# Define error handlers if necessary
async def error_handler1(scope, receive, send, exc):
# Custom error handling logic for app1
pass
async def error_handler2(scope, receive, send, exc):
# Custom error handling logic for app2
pass
dispatcher.map_url('/app1', app1, error_handler1)
dispatcher.map_url('/app2', app2, error_handler2)
# Run the dispatcher
@app.route("/")
async def homepage(request):
return JsonResponse({"message": "Hello, world!"})
@app.route("/app1")
async def app1_homepage(request):
return JSONResponse({"message": "App 1 homepage"})
@app.route("/app2")
async def app2_homepage(request):
return JSONResponse({"message": "App 2 homepage"})
```
"""
def __init__(self, main_app: ASGIApp, mounts: Dict[str, ASGIApp]) -> None:
"""
Initializes the Dispatcher instance.
Args:
main_app (ASGIApp): The main ASGI app to handle the requests.
mounts (Dict[str, ASGIApp]): A dictionary containing mounted apps.
Usage:
```python
main_app = Aquilify() # create a main app instance
app2 = Aquilify() #sub app for mounting in main_app
dispatcher = Dispatcher(main_app, {
'/app2': app2
})
Run:
$ netix --debug main:dispatcher
---------- or -----------
$ uvicorn main:dispatcher
"""
self.main_app: ASGIApp = main_app
self.mounts: Dict[str, ASGIApp] = mounts
self.error_handlers: Dict[str, Optional[Callable[..., Awaitable[None]]]] = {
mount_point: None for mount_point in mounts
}
self.logger = logging.getLogger(__name__)
def map_url(self, mount_point: str, app: ASGIApp,
error_handler: Optional[Callable[..., Awaitable[None]]] = None) -> None:
"""
Maps a URL mount point to a specified ASGI app.
Args:
mount_point (str): The URL mount point.
app (ASGIApp): The ASGI app to mount at the specified point.
error_handler (Optional[Callable[..., Awaitable[None]]]): Error handler for this mounted app.
"""
self.mounts[mount_point] = app
self.error_handlers[mount_point] = error_handler
def unmap_url(self, mount_point: str) -> None:
"""
Unmaps a URL mount point, removing the mounted app.
Args:
mount_point (str): The URL mount point to unmap.
"""
if mount_point in self.mounts:
del self.mounts[mount_point]
del self.error_handlers[mount_point]
async def conditional_mount(self, mount_point: str, app: ASGIApp,
condition: Union[Callable, Awaitable[bool]],
error_handler: Optional[Callable[..., Awaitable[None]]] = None) -> None:
"""
Mounts an ASGI app based on a specified condition.
Args:
mount_point (str): The URL mount point.
app (ASGIApp): The ASGI app to mount at the specified point.
condition (Union[Callable, Awaitable[bool]]): Condition to decide the mounting.
error_handler (Optional[Callable[..., Awaitable[None]]]): Error handler for this mounted app.
"""
if callable(condition):
condition = await condition()
if condition:
self.mounts[mount_point] = app
self.error_handlers[mount_point] = error_handler
| async def dispatch(self, scope: Scope, receive: Receive, send: Send) -> None: | 0 | 2023-11-16 08:26:02+00:00 | 2k |
Viicos/django-autotyping | src/django_autotyping/app_settings.py | [
{
"identifier": "Self",
"path": "src/django_autotyping/_compat.py",
"snippet": "def is_relative_to(path: Path, other: Path) -> bool:"
},
{
"identifier": "AutotypingSettingsDict",
"path": "src/django_autotyping/typing.py",
"snippet": "class AutotypingSettingsDict(TypedDict, total=False):\nclass StubsGenerationSettingsDict(TypedDict, total=False):\nclass CodeGenerationSettingsDict(TypedDict, total=False):\n IGNORE: list[RulesT]\n STUBS_GENERATION: StubsGenerationSettingsDict\n CODE_GENERATION: CodeGenerationSettingsDict\n LOCAL_STUBS_DIR: Path | None\n SOURCE_STUBS_DIR: Path | None\n ALLOW_PLAIN_MODEL_REFERENCES: bool\n ALLOW_NONE_SET_TYPE: bool\n MODEL_FIELDS_OPTIONAL: bool\n ALLOW_REVERSE_ARGS: bool\n PROJECT_DIR: Path | None\n DIFF: bool\n TYPE_CHECKING_BLOCK: bool\n ASSUME_CLASS_GETITEM: bool"
}
] | from copy import deepcopy
from dataclasses import dataclass, field
from pathlib import Path
from django.conf import LazySettings
from ._compat import Self
from .typing import AutotypingSettingsDict, RulesT | 1,178 | from __future__ import annotations
@dataclass
class CodeGenerationSettings:
"""Configuration for adding type annotations to Django user code."""
PROJECT_DIR: Path | None = None
"""The directory of the project, where code modifications should be applied."""
DIFF: bool = False
"""Show changes to be applied instead of modifying existing files."""
TYPE_CHECKING_BLOCK: bool = True
"""Whether newly added imports should be in an `if TYPE_CHECKING` block (avoids circular imports)."""
ASSUME_CLASS_GETITEM: bool = False
"""Whether generic classes in stubs files but not at runtime should be assumed to have a
`__class_getitem__` method. This can be achieved by using `django-stubs-ext` or manually.
Affected rules: `DJA001`.
"""
@dataclass
class StubsGenerationSettings:
"""Configuration for dynamic stubs generation."""
LOCAL_STUBS_DIR: Path | None = None
"""The directory of the local type stubs. If not set, this setting must be set as a CLI argument."""
SOURCE_STUBS_DIR: Path | None = None
"""The directory of the source `django-stubs` to be used. Will default
to the first entry in site packages.
"""
ALLOW_PLAIN_MODEL_REFERENCES: bool = True
"""Whether string references in the form of `{model_name}` should be generated in overloads.
If set to `True`, both `{model_name}` and `{model_name}.{app_label}` are allowed
(unless the model name has a duplicate in a different app).
Affected rules: `DJAS001`.
"""
ALLOW_NONE_SET_TYPE: bool = False
"""Whether to allow having the `__set__` type variable set to `None`, even if the field is not nullable.
While Django allows setting most model instance fields to any value (before saving),
it is generally a bad practice to do so. However, it might be beneficial to allow `None`
to be set temporarly.
This also works for foreign fields, where unlike standard fields, the Django descriptor used
only allows model instances and `None` to be set.
Affected rules: `DJAS001`.
"""
MODEL_FIELDS_OPTIONAL: bool = True
"""Whether all model fields should be considered optional when creating model instances.
This affects the following signatures:
- [`Manager.create/acreate`][django.db.models.Manager]
- `__init__` methods of models
A lot can happen behind the scenes when instantiating models. Even if a field doesn't have
a default value provided, the database could have triggers implemented that would provide one.
This is why, by default, this configuration attribute defaults to `True`. If set to `False`,
`django-autotyping` will try its best to determine required fields, namely by checking if:
- the field can be [`null`][django.db.models.Field.null]
- the field has a default or a database default value set
- the field is a subclass of [`DateField`][django.db.models.DateField] and has
[`auto_now`][django.db.models.DateField.auto_now] or [`auto_now_add`][django.db.models.DateField.auto_now_add]
set to `True`.
Affected rules: `DJAS002`.
"""
ALLOW_REVERSE_ARGS: bool = False
"""Whether type checking should be added to the `args` argument of [`reverse`][django.urls.reverse].
By default, this is set to `False` to avoid having too many overloads being generated.
Moreover, only tuples can be type checked, and most people are using lists for this argument.
Instead, it is recommended to use the `kwargs` argument.
Affected rules: `DJAS011`.
"""
@dataclass
class AutotypingSettings:
"""A class holding the django-autotyping configuration."""
IGNORE: list[RulesT] = field(default_factory=list)
"""A list of ignored rules."""
STUBS_GENERATION: StubsGenerationSettings = field(default_factory=StubsGenerationSettings)
"""Stub related settings."""
CODE_GENERATION: CodeGenerationSettings = field(default_factory=CodeGenerationSettings)
"""Code generation related settings."""
@classmethod
| from __future__ import annotations
@dataclass
class CodeGenerationSettings:
"""Configuration for adding type annotations to Django user code."""
PROJECT_DIR: Path | None = None
"""The directory of the project, where code modifications should be applied."""
DIFF: bool = False
"""Show changes to be applied instead of modifying existing files."""
TYPE_CHECKING_BLOCK: bool = True
"""Whether newly added imports should be in an `if TYPE_CHECKING` block (avoids circular imports)."""
ASSUME_CLASS_GETITEM: bool = False
"""Whether generic classes in stubs files but not at runtime should be assumed to have a
`__class_getitem__` method. This can be achieved by using `django-stubs-ext` or manually.
Affected rules: `DJA001`.
"""
@dataclass
class StubsGenerationSettings:
"""Configuration for dynamic stubs generation."""
LOCAL_STUBS_DIR: Path | None = None
"""The directory of the local type stubs. If not set, this setting must be set as a CLI argument."""
SOURCE_STUBS_DIR: Path | None = None
"""The directory of the source `django-stubs` to be used. Will default
to the first entry in site packages.
"""
ALLOW_PLAIN_MODEL_REFERENCES: bool = True
"""Whether string references in the form of `{model_name}` should be generated in overloads.
If set to `True`, both `{model_name}` and `{model_name}.{app_label}` are allowed
(unless the model name has a duplicate in a different app).
Affected rules: `DJAS001`.
"""
ALLOW_NONE_SET_TYPE: bool = False
"""Whether to allow having the `__set__` type variable set to `None`, even if the field is not nullable.
While Django allows setting most model instance fields to any value (before saving),
it is generally a bad practice to do so. However, it might be beneficial to allow `None`
to be set temporarly.
This also works for foreign fields, where unlike standard fields, the Django descriptor used
only allows model instances and `None` to be set.
Affected rules: `DJAS001`.
"""
MODEL_FIELDS_OPTIONAL: bool = True
"""Whether all model fields should be considered optional when creating model instances.
This affects the following signatures:
- [`Manager.create/acreate`][django.db.models.Manager]
- `__init__` methods of models
A lot can happen behind the scenes when instantiating models. Even if a field doesn't have
a default value provided, the database could have triggers implemented that would provide one.
This is why, by default, this configuration attribute defaults to `True`. If set to `False`,
`django-autotyping` will try its best to determine required fields, namely by checking if:
- the field can be [`null`][django.db.models.Field.null]
- the field has a default or a database default value set
- the field is a subclass of [`DateField`][django.db.models.DateField] and has
[`auto_now`][django.db.models.DateField.auto_now] or [`auto_now_add`][django.db.models.DateField.auto_now_add]
set to `True`.
Affected rules: `DJAS002`.
"""
ALLOW_REVERSE_ARGS: bool = False
"""Whether type checking should be added to the `args` argument of [`reverse`][django.urls.reverse].
By default, this is set to `False` to avoid having too many overloads being generated.
Moreover, only tuples can be type checked, and most people are using lists for this argument.
Instead, it is recommended to use the `kwargs` argument.
Affected rules: `DJAS011`.
"""
@dataclass
class AutotypingSettings:
"""A class holding the django-autotyping configuration."""
IGNORE: list[RulesT] = field(default_factory=list)
"""A list of ignored rules."""
STUBS_GENERATION: StubsGenerationSettings = field(default_factory=StubsGenerationSettings)
"""Stub related settings."""
CODE_GENERATION: CodeGenerationSettings = field(default_factory=CodeGenerationSettings)
"""Code generation related settings."""
@classmethod | def from_django_settings(cls, settings: LazySettings) -> Self: | 0 | 2023-11-11 20:42:05+00:00 | 2k |
IBM/oper8 | oper8/cmd/setup_vcs_cmd.py | [
{
"identifier": "DEFAULT_DEST",
"path": "oper8/setup_vcs.py",
"snippet": "DEFAULT_DEST = \"oper8_vcs\""
},
{
"identifier": "DEFAULT_TAG_EXPR",
"path": "oper8/setup_vcs.py",
"snippet": "DEFAULT_TAG_EXPR = r\"[0-9]+\\.[0-9]+\\.[0-9]+\""
},
{
"identifier": "setup_vcs",
"path": "oper8/setup_vcs.py",
"snippet": "def setup_vcs(\n source: str,\n destination: Optional[str] = None,\n branch_expr: Optional[List[str]] = None,\n tag_expr: Optional[List[str]] = __UNSET__,\n force: bool = False,\n):\n \"\"\"This utility will initialize an operator's VCS directory for use with\n oper8's VCS versioning.\n\n Args:\n source (str): The path to the source repository on disk\n destination (Optional[str]): The path where the VCS repo should be\n created\n branch_expr (Optional[List[str]]): Regular expression(s) to use to\n identify branches to retain in the VCS repo\n tag_expr (Optional[List[str]]): Regular expression(s) to use to\n identify tags to retain in the VCS repo\n force (bool): Force overwrite existing destination\n \"\"\"\n initializer = VCSRepoInitializer(\n source=source, destination=destination or DEFAULT_DEST, force=force\n )\n initializer.initialize_branches(\n branch_expr=branch_expr,\n tag_expr=tag_expr if tag_expr is not __UNSET__ else [DEFAULT_TAG_EXPR],\n )\n initializer.clean_up()"
},
{
"identifier": "CmdBase",
"path": "oper8/cmd/base.py",
"snippet": "class CmdBase(abc.ABC):\n __doc__ = __doc__\n\n @abc.abstractmethod\n def add_subparser(\n self,\n subparsers: argparse._SubParsersAction,\n ) -> argparse.ArgumentParser:\n \"\"\"Add this command's argument parser subcommand\n\n Args:\n subparsers (argparse._SubParsersAction): The subparser section for\n the central main parser\n\n Returns:\n subparser (argparse.ArgumentParser): The configured parser for this\n command\n \"\"\"\n\n @abc.abstractmethod\n def cmd(self, args: argparse.Namespace):\n \"\"\"Execute the command with the parsed arguments\n\n Args:\n args (argparse.Namespace): The parsed command line arguments\n \"\"\""
}
] | import argparse
import alog
from ..setup_vcs import DEFAULT_DEST, DEFAULT_TAG_EXPR, setup_vcs
from .base import CmdBase | 750 | """
CLI command for setting up a VCS version repo
"""
# Standard
# First Party
# Local
log = alog.use_channel("CMD-VCS")
class SetupVCSCmd(CmdBase):
__doc__ = __doc__
def add_subparser(
self,
subparsers: argparse._SubParsersAction,
) -> argparse.ArgumentParser:
"""Add the subparser for this command"""
parser = subparsers.add_parser(
"setup-vcs",
help="Initialize a clean git repo to use with VCS versioning",
)
command_args = parser.add_argument_group("Command Arguments")
command_args.add_argument(
"--source",
"-s",
required=True,
help="Source repo to seed the clean git history",
)
command_args.add_argument(
"--destination",
"-d",
| """
CLI command for setting up a VCS version repo
"""
# Standard
# First Party
# Local
log = alog.use_channel("CMD-VCS")
class SetupVCSCmd(CmdBase):
__doc__ = __doc__
def add_subparser(
self,
subparsers: argparse._SubParsersAction,
) -> argparse.ArgumentParser:
"""Add the subparser for this command"""
parser = subparsers.add_parser(
"setup-vcs",
help="Initialize a clean git repo to use with VCS versioning",
)
command_args = parser.add_argument_group("Command Arguments")
command_args.add_argument(
"--source",
"-s",
required=True,
help="Source repo to seed the clean git history",
)
command_args.add_argument(
"--destination",
"-d", | default=DEFAULT_DEST, | 0 | 2023-11-15 16:43:29+00:00 | 2k |
ariebovenberg/whenever | tests/test_naive_datetime.py | [
{
"identifier": "AlwaysEqual",
"path": "tests/common.py",
"snippet": "class AlwaysEqual:\n def __eq__(self, other):\n return True"
},
{
"identifier": "AlwaysLarger",
"path": "tests/common.py",
"snippet": "class AlwaysLarger:\n def __lt__(self, other):\n return False\n\n def __le__(self, other):\n return False\n\n def __gt__(self, other):\n return True\n\n def __ge__(self, other):\n return True"
},
{
"identifier": "AlwaysSmaller",
"path": "tests/common.py",
"snippet": "class AlwaysSmaller:\n def __lt__(self, other):\n return True\n\n def __le__(self, other):\n return True\n\n def __gt__(self, other):\n return False\n\n def __ge__(self, other):\n return False"
},
{
"identifier": "NeverEqual",
"path": "tests/common.py",
"snippet": "class NeverEqual:\n def __eq__(self, other):\n return False"
},
{
"identifier": "local_ams_tz",
"path": "tests/common.py",
"snippet": "@contextmanager\ndef local_ams_tz():\n with patch.dict(os.environ, {\"TZ\": \"Europe/Amsterdam\"}):\n tzset()\n yield"
}
] | import pickle
import weakref
import pytest
from datetime import datetime as py_datetime
from datetime import timedelta, timezone
from hypothesis import given
from hypothesis.strategies import text
from whenever import InvalidFormat, NaiveDateTime
from .common import (
AlwaysEqual,
AlwaysLarger,
AlwaysSmaller,
NeverEqual,
local_ams_tz,
) | 1,181 |
def test_minimal():
d = NaiveDateTime(2020, 8, 15, 5, 12, 30, 450)
assert d.year == 2020
assert d.month == 8
assert d.day == 15
assert d.hour == 5
assert d.minute == 12
assert d.second == 30
assert d.microsecond == 450
assert (
NaiveDateTime(2020, 8, 15, 12)
== NaiveDateTime(2020, 8, 15, 12, 0)
== NaiveDateTime(2020, 8, 15, 12, 0, 0)
== NaiveDateTime(2020, 8, 15, 12, 0, 0, 0)
)
def test_immutable():
d = NaiveDateTime(2020, 8, 15)
with pytest.raises(AttributeError):
d.year = 2021 # type: ignore[misc]
class TestFromCanonicalStr:
def test_valid(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30)
def test_valid_three_fractions(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30.349"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30, 349_000)
def test_valid_six_fractions(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30.349123"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30, 349_123)
def test_single_space_instead_of_T(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15 12:08:30"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30)
def test_unpadded(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("2020-8-15T12:8:30")
def test_overly_precise_fraction(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30.123456789123"
)
def test_trailing_z(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("2020-08-15T12:08:30Z")
def test_no_seconds(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("2020-08-15T12:08")
def test_empty(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("")
def test_garbage(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("garbage")
@given(text())
def test_fuzzing(self, s: str):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str(s)
def test_equality():
d = NaiveDateTime(2020, 8, 15)
different = NaiveDateTime(2020, 8, 16)
same = NaiveDateTime(2020, 8, 15)
assert d == same
assert d != different
assert not d == different
assert not d != same
assert hash(d) == hash(same)
assert hash(d) != hash(different)
|
def test_minimal():
d = NaiveDateTime(2020, 8, 15, 5, 12, 30, 450)
assert d.year == 2020
assert d.month == 8
assert d.day == 15
assert d.hour == 5
assert d.minute == 12
assert d.second == 30
assert d.microsecond == 450
assert (
NaiveDateTime(2020, 8, 15, 12)
== NaiveDateTime(2020, 8, 15, 12, 0)
== NaiveDateTime(2020, 8, 15, 12, 0, 0)
== NaiveDateTime(2020, 8, 15, 12, 0, 0, 0)
)
def test_immutable():
d = NaiveDateTime(2020, 8, 15)
with pytest.raises(AttributeError):
d.year = 2021 # type: ignore[misc]
class TestFromCanonicalStr:
def test_valid(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30)
def test_valid_three_fractions(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30.349"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30, 349_000)
def test_valid_six_fractions(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30.349123"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30, 349_123)
def test_single_space_instead_of_T(self):
assert NaiveDateTime.from_canonical_str(
"2020-08-15 12:08:30"
) == NaiveDateTime(2020, 8, 15, 12, 8, 30)
def test_unpadded(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("2020-8-15T12:8:30")
def test_overly_precise_fraction(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str(
"2020-08-15T12:08:30.123456789123"
)
def test_trailing_z(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("2020-08-15T12:08:30Z")
def test_no_seconds(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("2020-08-15T12:08")
def test_empty(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("")
def test_garbage(self):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str("garbage")
@given(text())
def test_fuzzing(self, s: str):
with pytest.raises(InvalidFormat):
NaiveDateTime.from_canonical_str(s)
def test_equality():
d = NaiveDateTime(2020, 8, 15)
different = NaiveDateTime(2020, 8, 16)
same = NaiveDateTime(2020, 8, 15)
assert d == same
assert d != different
assert not d == different
assert not d != same
assert hash(d) == hash(same)
assert hash(d) != hash(different)
| assert d == AlwaysEqual() | 0 | 2023-11-10 21:08:49+00:00 | 2k |
DataWizual/Raycasting | drawing.py | [
{
"identifier": "ray_casting",
"path": "ray_casting.py",
"snippet": "def ray_casting(sc, player_pos, player_angle):\r\n ox, oy = player_pos\r\n xm, ym = mapping(ox, oy)\r\n cur_angle = player_angle - HALF_FOV\r\n for ray in range(NUM_RAYS):\r\n sin_a = math.sin(cur_angle)\r\n cos_a = math.cos(cur_angle)\r\n sin_a = sin_a if sin_a else 0.000001\r\n cos_a = cos_a if cos_a else 0.000001\r\n\r\n # verticals\r\n x, dx = (xm + TILE, 1) if cos_a >= 0 else (xm, -1)\r\n for i in range(0, WIDTH, TILE):\r\n depth_v = (x - ox) / cos_a\r\n y = oy + depth_v * sin_a\r\n if mapping(x + dx, y) in world_map:\r\n break\r\n x += dx * TILE\r\n\r\n # horizontals\r\n y, dy = (ym + TILE, 1) if sin_a >= 0 else (ym, -1)\r\n for i in range(0, HEIGHT, TILE):\r\n depth_h = (y - oy) / sin_a\r\n x = ox + depth_h * cos_a\r\n if mapping(x, y + dy) in world_map:\r\n break\r\n y += dy * TILE\r\n\r\n # projection\r\n depth = depth_v if depth_v < depth_h else depth_h\r\n depth *= math.cos(player_angle - cur_angle)\r\n proj_height = PROJ_COEFF / depth\r\n c = 255 / (1 + depth * depth * 0.00002)\r\n color = (63 + c // 2, 63 + c // 2, 63 + c // 2)\r\n pygame.draw.rect(sc, color, (ray * SCALE, HALF_HEIGHT - proj_height // 2, SCALE, proj_height))\r\n cur_angle += DELTA_ANGLE"
},
{
"identifier": "mini_map",
"path": "map.py",
"snippet": ""
}
] | import pygame
from settings import *
from ray_casting import ray_casting
from map import mini_map
| 650 |
class Drawing:
def __init__(self, sc, sc_map):
self.sc = sc
self.sc_map = sc_map
self.font = pygame.font.SysFont('Arial', 36, bold=True)
def background(self):
pygame.draw.rect(self.sc, SKYBLUE, (0, 0, WIDTH, HALF_HEIGHT))
pygame.draw.rect(self.sc, DARKGREY, (0, HALF_HEIGHT, WIDTH, HALF_HEIGHT))
def world(self, player_pos, player_angle):
ray_casting(self.sc, player_pos, player_angle)
def fps(self, clock):
display_fps = str(int(clock.get_fps()))
render = self.font.render(display_fps, 0, CRED)
self.sc.blit(render, FPS_POS)
|
class Drawing:
def __init__(self, sc, sc_map):
self.sc = sc
self.sc_map = sc_map
self.font = pygame.font.SysFont('Arial', 36, bold=True)
def background(self):
pygame.draw.rect(self.sc, SKYBLUE, (0, 0, WIDTH, HALF_HEIGHT))
pygame.draw.rect(self.sc, DARKGREY, (0, HALF_HEIGHT, WIDTH, HALF_HEIGHT))
def world(self, player_pos, player_angle):
ray_casting(self.sc, player_pos, player_angle)
def fps(self, clock):
display_fps = str(int(clock.get_fps()))
render = self.font.render(display_fps, 0, CRED)
self.sc.blit(render, FPS_POS)
| def mini_map(self, player):
| 1 | 2023-11-15 12:18:25+00:00 | 2k |
CV-Reimplementation/Ucolor-Reimplementation | train.py | [
{
"identifier": "Config",
"path": "config/config.py",
"snippet": "class Config(object):\n r\"\"\"\n A collection of all the required configuration parameters. This class is a nested dict-like\n structure, with nested keys accessible as attributes. It contains sensible default values for\n all the parameters, which may be overriden by (first) through a YAML file and (second) through\n a list of attributes and values.\n\n Extended Summary\n ----------------\n This class definition contains default values corresponding to ``joint_training`` phase, as it\n is the final training phase and uses almost all the configuration parameters. Modification of\n any parameter after instantiating this class is not possible, so you must override required\n parameter values in either through ``config_yaml`` file or ``config_override`` list.\n\n Parameters\n ----------\n config_yaml: str\n Path to a YAML file containing configuration parameters to override.\n config_override: List[Any], optional (default= [])\n A list of sequential attributes and values of parameters to override. This happens after\n overriding from YAML file.\n\n Examples\n --------\n Let a YAML file named \"config.yaml\" specify these parameters to override::\n\n ALPHA: 1000.0\n BETA: 0.5\n\n >>> _C = Config(\"config.yaml\", [\"OPTIM.BATCH_SIZE\", 2048, \"BETA\", 0.7])\n >>> _C.ALPHA # default: 100.0\n 1000.0\n >>> _C.BATCH_SIZE # default: 256\n 2048\n >>> _C.BETA # default: 0.1\n 0.7\n\n Attributes\n ----------\n \"\"\"\n\n def __init__(self, config_yaml: str, config_override: List[Any] = []):\n self._C = CN()\n self._C.GPU = [0]\n self._C.VERBOSE = False\n\n self._C.MODEL = CN()\n self._C.MODEL.SESSION = 'LUT'\n self._C.MODEL.INPUT = 'input'\n self._C.MODEL.TARGET = 'target'\n\n self._C.OPTIM = CN()\n self._C.OPTIM.BATCH_SIZE = 1\n self._C.OPTIM.SEED = 3407\n self._C.OPTIM.NUM_EPOCHS = 100\n self._C.OPTIM.NEPOCH_DECAY = [100]\n self._C.OPTIM.LR_INITIAL = 0.0002\n self._C.OPTIM.LR_MIN = 0.0002\n self._C.OPTIM.BETA1 = 0.5\n self._C.OPTIM.WANDB = False\n\n self._C.TRAINING = CN()\n self._C.TRAINING.VAL_AFTER_EVERY = 3\n self._C.TRAINING.RESUME = False\n self._C.TRAINING.TRAIN_DIR = '../dataset/Jung/train'\n self._C.TRAINING.VAL_DIR = '../dataset/Jung/test'\n self._C.TRAINING.SAVE_DIR = 'checkpoints'\n self._C.TRAINING.PS_W = 512\n self._C.TRAINING.PS_H = 512\n self._C.TRAINING.ORI = False\n\n self._C.TESTING = CN()\n self._C.TESTING.WEIGHT = None\n self._C.TESTING.SAVE_IMAGES = False\n\n # Override parameter values from YAML file first, then from override list.\n self._C.merge_from_file(config_yaml)\n self._C.merge_from_list(config_override)\n\n # Make an instantiated object of this class immutable.\n self._C.freeze()\n\n def dump(self, file_path: str):\n r\"\"\"Save config at the specified file path.\n\n Parameters\n ----------\n file_path: str\n (YAML) path to save config at.\n \"\"\"\n self._C.dump(stream=open(file_path, \"w\"))\n\n def __getattr__(self, attr: str):\n return self._C.__getattr__(attr)\n\n def __repr__(self):\n return self._C.__repr__()"
},
{
"identifier": "get_training_data",
"path": "data/data_RGB.py",
"snippet": "def get_training_data(rgb_dir, inp, target, img_options):\n assert os.path.exists(rgb_dir)\n return DataLoaderTrain(rgb_dir, inp, target, img_options)"
},
{
"identifier": "get_validation_data",
"path": "data/data_RGB.py",
"snippet": "def get_validation_data(rgb_dir, inp, target, img_options):\n assert os.path.exists(rgb_dir)\n return DataLoaderVal(rgb_dir, inp, target, img_options)"
}
] | import warnings
import torch.optim as optim
from accelerate import Accelerator
from pytorch_msssim import SSIM
from torch.utils.data import DataLoader
from torchmetrics.functional import peak_signal_noise_ratio, structural_similarity_index_measure
from tqdm import tqdm
from config import Config
from data import get_training_data, get_validation_data
from models import *
from utils import * | 1,388 |
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("shadow", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
train_dataset = get_training_data(train_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, {'w': opt.TRAINING.PS_W, 'h': opt.TRAINING.PS_H})
train_loader = DataLoader(dataset=train_dataset, batch_size=opt.OPTIM.BATCH_SIZE, shuffle=True, num_workers=16,
drop_last=False, pin_memory=True)
|
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("shadow", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
train_dataset = get_training_data(train_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, {'w': opt.TRAINING.PS_W, 'h': opt.TRAINING.PS_H})
train_loader = DataLoader(dataset=train_dataset, batch_size=opt.OPTIM.BATCH_SIZE, shuffle=True, num_workers=16,
drop_last=False, pin_memory=True) | val_dataset = get_validation_data(val_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, {'w': opt.TRAINING.PS_W, 'h': opt.TRAINING.PS_H, 'ori': opt.TRAINING.ORI}) | 2 | 2023-11-14 05:40:54+00:00 | 2k |
ottuco/multi-api-mocker | multi_api_mocker/contrib/pytest_plugin.py | [
{
"identifier": "group_by_url",
"path": "multi_api_mocker/utils.py",
"snippet": "def group_by_url(api_mocks: List[MockAPIResponse]) -> List[MockConfiguration]:\n \"\"\"\n Organizes a list of MockAPIResponse objects by their URL and method, grouping\n them into lists of responses for each endpoint. This grouping is necessary for\n requests-mock when multiple responses for the same endpoint are required, as it\n allows requests-mock to cycle through the responses in order for each subsequent\n call to the same URL.\n\n Parameters:\n api_mocks (List[MockConfiguration]): A list of MockAPIResponse objects\n representing the expected responses\n for different API calls.\n\n Returns:\n List[MockConfiguration]: A list of MockConfiguration objects where each object\n contains the URL, method, and a list of responses to be\n used by requests-mock to simulate API interactions.\n \"\"\"\n\n grouped_mocks = defaultdict(list)\n for mock in api_mocks:\n # Create an instance of ResponseKwargs\n response_kwargs = ResponseKwargs(\n text=mock.text if not mock.exc else None,\n status_code=mock.status_code if not mock.exc else None,\n json=mock.json if not mock.exc else None,\n exc=mock.exc if mock.exc else None,\n )\n\n # Add the ResponseKwargs instance, not the dict\n grouped_mocks[(mock.url, mock.method)].append(response_kwargs)\n\n output = []\n for (url, method), kwargs_list in grouped_mocks.items():\n # Convert each ResponseKwargs instance to a dict\n responses = [kwargs.to_dict() for kwargs in kwargs_list]\n config = MockConfiguration(url=url, method=method.upper(), responses=responses)\n output.append(config)\n\n return output"
},
{
"identifier": "MockSet",
"path": "multi_api_mocker/utils.py",
"snippet": "class MockSet:\n \"\"\"\n A collection class that manages MockAPIResponse objects and integrates with the\n requests_mock fixture. This class provides efficient access and iteration over\n grouped API responses by their endpoint names, simplifying the process of setting\n up and managing multiple mock responses in tests. It also stores and allows access\n to the requests_mock adapter's _Matcher objects associated with each mock response,\n enabling advanced interactions and assertions in tests.\n\n Parameters:\n api_responses (List[MockAPIResponse]): A list of MockAPIResponse objects, each\n representing a specific API response.\n requests_mock (Mocker): The requests_mock fixture instance used for registering\n the mock API responses.\n matchers (Dict[str, _Matcher]): A dictionary mapping endpoint names to their\n respective requests_mock adapter _Matcher\n objects.\n\n Attributes:\n _response_registry (Dict[str, MockAPIResponse]): A dictionary mapping endpoint\n names to their respective\n MockAPIResponse objects.\n requests_mock (Optional[Mocker]): The requests_mock fixture instance.\n matchers (Dict[str, _Matcher]): A dictionary of _Matcher objects, providing\n detailed control and inspection capabilities\n for the registered mock API responses.\n\n Methods:\n get_matcher(endpoint_name: str) -> _Matcher: Returns the _Matcher object\n associated with the given\n endpoint name.\n \"\"\"\n\n def __init__(\n self,\n api_responses: List[MockAPIResponse],\n requests_mock: Mocker = None,\n matchers: Dict[str, _Matcher] = None,\n ):\n self._response_registry = {\n response.endpoint_name: response for response in api_responses\n }\n self.requests_mock = requests_mock\n self.matchers = matchers or {}\n\n def __getitem__(self, endpoint_name: str) -> MockAPIResponse:\n return self._response_registry[endpoint_name]\n\n def __iter__(self):\n return iter(self._response_registry.values())\n\n def __len__(self):\n return len(self._response_registry)\n\n def __repr__(self):\n endpoint_names = \", \".join(self._response_registry.keys())\n return f\"<{self.__class__.__name__} with endpoints: {endpoint_names}>\"\n\n def get_matcher(self, endpoint_name: str) -> _Matcher:\n return self.matchers.get(endpoint_name)"
}
] | import pytest
from requests_mock import Mocker
from ..utils import group_by_url, MockSet | 1,383 |
@pytest.fixture(scope="function")
def setup_api_mocks(requests_mock: Mocker, request) -> MockSet:
"""
A pytest fixture for configuring mock API responses in a test environment.
It takes subclasses of MockAPIResponse, each representing a unique API call
configuration. These subclasses facilitate the creation of simple or complex
response flows, simulating real-world API interactions.
Parameters:
requests_mock (Mocker): The pytest requests_mock fixture.
request: The pytest request object containing parametrized test data.
Returns:
MockSet: An instance of MockSet containing the organized MockAPIResponse
objects, ready for use in tests.
The fixture supports multiple test scenarios, allowing for thorough
testing of varying API response conditions. This is especially useful
for simulating sequences of API calls like Fork, Commit, and Push
in a version control system context.
Example Usage:
- Single API Call Test:
@pytest.mark.parametrize("setup_api_mocks", [([Fork()])], indirect=True)
- Multi-call Sequence Test:
@pytest.mark.parametrize(
"setup_api_mocks", [([Fork(), Commit(), Push()])], indirect=True
)
- Testing Multiple Scenarios:
@pytest.mark.parametrize(
"setup_api_mocks",
[([Fork(), Commit(), Push()]), ([Fork(), Commit(), ForcePush()])],
indirect=True
)
This fixture converts the list of MockAPIResponse subclasses into MockConfiguration
instances, registers them with requests_mock, and returns a MockSet object, which
allows querying each mock by its endpoint name.
"""
# Convert the incoming parameter to a list of MockConfiguration instances
|
@pytest.fixture(scope="function")
def setup_api_mocks(requests_mock: Mocker, request) -> MockSet:
"""
A pytest fixture for configuring mock API responses in a test environment.
It takes subclasses of MockAPIResponse, each representing a unique API call
configuration. These subclasses facilitate the creation of simple or complex
response flows, simulating real-world API interactions.
Parameters:
requests_mock (Mocker): The pytest requests_mock fixture.
request: The pytest request object containing parametrized test data.
Returns:
MockSet: An instance of MockSet containing the organized MockAPIResponse
objects, ready for use in tests.
The fixture supports multiple test scenarios, allowing for thorough
testing of varying API response conditions. This is especially useful
for simulating sequences of API calls like Fork, Commit, and Push
in a version control system context.
Example Usage:
- Single API Call Test:
@pytest.mark.parametrize("setup_api_mocks", [([Fork()])], indirect=True)
- Multi-call Sequence Test:
@pytest.mark.parametrize(
"setup_api_mocks", [([Fork(), Commit(), Push()])], indirect=True
)
- Testing Multiple Scenarios:
@pytest.mark.parametrize(
"setup_api_mocks",
[([Fork(), Commit(), Push()]), ([Fork(), Commit(), ForcePush()])],
indirect=True
)
This fixture converts the list of MockAPIResponse subclasses into MockConfiguration
instances, registers them with requests_mock, and returns a MockSet object, which
allows querying each mock by its endpoint name.
"""
# Convert the incoming parameter to a list of MockConfiguration instances | api_mocks_configurations = group_by_url(request.param) | 0 | 2023-11-12 08:01:06+00:00 | 2k |
Jisencc/yolov5_dual_weighting | utils/segment/augmentations.py | [
{
"identifier": "box_candidates",
"path": "utils/augmentations.py",
"snippet": "def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)\n # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio\n return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates"
},
{
"identifier": "resample_segments",
"path": "utils/general.py",
"snippet": "def resample_segments(segments, n=1000):\n # Up-sample an (n,2) segment\n for i, s in enumerate(segments):\n s = np.concatenate((s, s[0:1, :]), axis=0)\n x = np.linspace(0, len(s) - 1, n)\n xp = np.arange(len(s))\n segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy\n return segments"
},
{
"identifier": "segment2box",
"path": "utils/general.py",
"snippet": "def segment2box(segment, width=640, height=640):\n # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)\n x, y = segment.T # segment xy\n inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)\n x, y, = x[inside], y[inside]\n return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy"
}
] | import math
import random
import cv2
import numpy as np
from ..augmentations import box_candidates
from ..general import resample_segments, segment2box | 1,500 | # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
"""
Image augmentation functions
"""
def mixup(im, labels, segments, im2, labels2, segments2):
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
segments = np.concatenate((segments, segments2), 0)
return im, labels, segments
def random_perspective(im,
targets=(),
segments=(),
degrees=10,
translate=.1,
scale=.1,
shear=10,
perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
width = im.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
new_segments = []
if n:
new = np.zeros((n, 4))
| # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
"""
Image augmentation functions
"""
def mixup(im, labels, segments, im2, labels2, segments2):
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
segments = np.concatenate((segments, segments2), 0)
return im, labels, segments
def random_perspective(im,
targets=(),
segments=(),
degrees=10,
translate=.1,
scale=.1,
shear=10,
perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
width = im.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)
T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
new_segments = []
if n:
new = np.zeros((n, 4)) | segments = resample_segments(segments) # upsample | 1 | 2023-11-12 13:28:26+00:00 | 2k |