Spaces:
Sleeping
Sleeping
billusanda007
commited on
Upload 2 files
Browse files- app.py +162 -0
- requirements.txt +207 -0
app.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
3 |
+
import gradio as gr
|
4 |
+
import pandas as pd
|
5 |
+
from collections import Counter, defaultdict
|
6 |
+
import os
|
7 |
+
from huggingface_hub import login
|
8 |
+
|
9 |
+
# Get the token from the environment variable
|
10 |
+
api_token = os.getenv('HF_TOKEN')
|
11 |
+
|
12 |
+
# Load pre-trained model and tokenizer
|
13 |
+
model_name = "gpt2"
|
14 |
+
model = GPT2LMHeadModel.from_pretrained(model_name)
|
15 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
16 |
+
model.eval()
|
17 |
+
|
18 |
+
def create_ngrams(tokens, n):
|
19 |
+
return [tuple(tokens[i:i+n]) for i in range(len(tokens)-n+1)]
|
20 |
+
|
21 |
+
def calculate_probabilities(four_gram_counts, three_gram_counts):
|
22 |
+
probabilities = defaultdict(lambda: defaultdict(float))
|
23 |
+
for four_gram, count in four_gram_counts.items():
|
24 |
+
three_gram = four_gram[:-1]
|
25 |
+
probabilities[three_gram][four_gram[-1]] = count / three_gram_counts[three_gram]
|
26 |
+
return probabilities
|
27 |
+
|
28 |
+
def kneser_ney_smoothing(ngram_counts, lower_order_counts, discount=0.75):
|
29 |
+
continuation_counts = Counter()
|
30 |
+
lower_counts = Counter()
|
31 |
+
|
32 |
+
for ngram in ngram_counts:
|
33 |
+
lower_counts[ngram[1:]] += 1
|
34 |
+
continuation_counts[ngram[1:]] += 1
|
35 |
+
|
36 |
+
def continuation_probability(word):
|
37 |
+
return continuation_counts[word] / sum(continuation_counts.values())
|
38 |
+
|
39 |
+
probabilities = defaultdict(lambda: defaultdict(float))
|
40 |
+
for ngram, count in ngram_counts.items():
|
41 |
+
lower_ngram = ngram[:-1]
|
42 |
+
discounted_count = max(count - discount, 0)
|
43 |
+
lambda_factor = (discount / lower_order_counts[lower_ngram]) * len(continuation_counts)
|
44 |
+
probabilities[lower_ngram][ngram[-1]] = (discounted_count / lower_order_counts[lower_ngram]) + lambda_factor * continuation_probability(ngram[-1])
|
45 |
+
|
46 |
+
return probabilities
|
47 |
+
|
48 |
+
def generate_text_with_probs(initial_context, top_p, max_length, top_k):
|
49 |
+
input_ids = tokenizer.encode(initial_context, return_tensors="pt")
|
50 |
+
generated_text = initial_context
|
51 |
+
token_tables = []
|
52 |
+
|
53 |
+
with torch.no_grad():
|
54 |
+
for _ in range(max_length):
|
55 |
+
outputs = model(input_ids=input_ids)
|
56 |
+
next_token_logits = outputs.logits[:, -1, :]
|
57 |
+
|
58 |
+
# Apply top-p (nucleus) sampling
|
59 |
+
sorted_logits, sorted_indices = torch.sort(next_token_logits, descending=True)
|
60 |
+
cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
|
61 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
62 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
63 |
+
sorted_indices_to_remove[..., 0] = 0
|
64 |
+
|
65 |
+
# Convert boolean mask to indices to set logits to -inf
|
66 |
+
indices_to_remove = sorted_indices[sorted_indices_to_remove]
|
67 |
+
next_token_logits[:, indices_to_remove] = -float('Inf')
|
68 |
+
|
69 |
+
# Compute probabilities
|
70 |
+
probabilities = torch.softmax(next_token_logits, dim=-1)
|
71 |
+
|
72 |
+
# Get the next token using multinomial sampling
|
73 |
+
next_token = torch.multinomial(probabilities, num_samples=1)
|
74 |
+
|
75 |
+
# Get next token and its probability
|
76 |
+
next_token_prob = probabilities[0, next_token].item()
|
77 |
+
next_token_text = tokenizer.decode(next_token.item())
|
78 |
+
|
79 |
+
# Get top tokens and their probabilities
|
80 |
+
top_tokens = sorted_indices[0, :top_k] # Get top k tokens
|
81 |
+
top_probs = probabilities[0, top_tokens]
|
82 |
+
top_token_probs = [(tokenizer.decode([token.item()]), prob.item()) for token, prob in zip(top_tokens, top_probs)]
|
83 |
+
|
84 |
+
# Create DataFrame for current token's top-k probabilities
|
85 |
+
df = pd.DataFrame(top_token_probs, columns=["Token", "Probability"])
|
86 |
+
df.index = df.index + 1 # Add numbering to the DataFrame
|
87 |
+
token_tables.append((f"Next token: {next_token_text} (Probability: {next_token_prob:.4f})", df))
|
88 |
+
|
89 |
+
# Add the next token to the input_ids
|
90 |
+
input_ids = torch.cat([input_ids, next_token], dim=-1)
|
91 |
+
|
92 |
+
if next_token.item() == tokenizer.eos_token_id:
|
93 |
+
break
|
94 |
+
|
95 |
+
# Decode the generated text
|
96 |
+
generated_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)
|
97 |
+
|
98 |
+
return generated_text, token_tables
|
99 |
+
|
100 |
+
def predict_next_token_ngram(input_text, context_text, max_length):
|
101 |
+
context_tokens = tokenizer.tokenize(context_text)
|
102 |
+
four_grams = create_ngrams(context_tokens, 4)
|
103 |
+
four_gram_counts = Counter(four_grams)
|
104 |
+
three_gram_counts = Counter([gram[:-1] for gram in four_grams])
|
105 |
+
probabilities = calculate_probabilities(four_gram_counts, three_gram_counts)
|
106 |
+
probs = kneser_ney_smoothing(four_gram_counts, three_gram_counts)
|
107 |
+
|
108 |
+
input_tokens = tokenizer.tokenize(input_text)
|
109 |
+
generated_text = input_text
|
110 |
+
token_tables = []
|
111 |
+
|
112 |
+
if len(input_tokens) >= max_length:
|
113 |
+
generated_text = tokenizer.convert_tokens_to_string(input_tokens)
|
114 |
+
return generated_text, token_tables
|
115 |
+
|
116 |
+
while len(input_tokens) < max_length:
|
117 |
+
input_3_gram = tuple(input_tokens[-3:])
|
118 |
+
next_token_probs = probs.get(input_3_gram, {})
|
119 |
+
if not next_token_probs:
|
120 |
+
break
|
121 |
+
next_token = max(next_token_probs, key=next_token_probs.get)
|
122 |
+
input_tokens.append(next_token)
|
123 |
+
|
124 |
+
# Get top tokens and their probabilities
|
125 |
+
top_k = 4
|
126 |
+
top_k_tokens = sorted(next_token_probs.items(), key=lambda x: x[1], reverse=True)[:top_k]
|
127 |
+
top_k_tokens_df = pd.DataFrame(top_k_tokens, columns=["Token", "Probability"])
|
128 |
+
top_k_tokens_df.index = top_k_tokens_df.index + 1 # Add numbering to the DataFrame
|
129 |
+
top_k_tokens_df["Token"] = top_k_tokens_df["Token"].apply(lambda x: tokenizer.convert_tokens_to_string([x]))
|
130 |
+
|
131 |
+
token_tables.append((f"Next token: {next_token} (Predicted)", top_k_tokens_df))
|
132 |
+
|
133 |
+
generated_text = tokenizer.convert_tokens_to_string(input_tokens)
|
134 |
+
return generated_text, token_tables
|
135 |
+
|
136 |
+
def combined_model_predictions(context_text, initial_context, top_p, max_length, top_k):
|
137 |
+
generated_text, token_tables = generate_text_with_probs(initial_context, top_p, max_length, top_k)
|
138 |
+
ngram_generated_text, ngram_token_tables = predict_next_token_ngram(initial_context, context_text, max_length)
|
139 |
+
|
140 |
+
return generated_text, token_tables, ngram_generated_text, ngram_token_tables
|
141 |
+
|
142 |
+
iface = gr.Interface(
|
143 |
+
fn=combined_model_predictions,
|
144 |
+
inputs=[
|
145 |
+
gr.Textbox(lines=4, placeholder="Enter context for N-gram model..."),
|
146 |
+
gr.Textbox(lines=2, placeholder="Enter initial context here..."),
|
147 |
+
gr.Slider(0, 1, step=0.01, value=0.9, label="Top-p (nucleus) sampling"),
|
148 |
+
gr.Slider(1, 100, step=1, value=50, label="Max length"),
|
149 |
+
gr.Slider(1, 50, step=1, value=10, label="Top-k"), # Added Top-k slider
|
150 |
+
],
|
151 |
+
outputs=[
|
152 |
+
gr.Textbox(label="Generated Text"),
|
153 |
+
gr.Dataframe(label="LLM Token Probabilities"), # Display DataFrame as output
|
154 |
+
gr.Textbox(label="N-gram Generated Text"),
|
155 |
+
gr.Dataframe(label="N-gram Token Predictions"), # Display N-gram model predictions
|
156 |
+
],
|
157 |
+
title="Next Token Visualizer (GPT-2 - 124M param.)",
|
158 |
+
description="Generate text using GPT-2 with top-p (nucleus) sampling and see the probabilities of generated tokens in tables, along with N-gram model predictions.",
|
159 |
+
)
|
160 |
+
|
161 |
+
# Launch the Gradio app
|
162 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.1.0
|
2 |
+
aiofiles==23.2.1
|
3 |
+
annotated-types==0.5.0
|
4 |
+
anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1688651106312/work/dist
|
5 |
+
appnope @ file:///home/conda/feedstock_root/build_artifacts/appnope_1649077682618/work
|
6 |
+
argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1640817743617/work
|
7 |
+
argon2-cffi-bindings @ file:///Users/runner/miniforge3/conda-bld/argon2-cffi-bindings_1666850912616/work
|
8 |
+
arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1662382474514/work
|
9 |
+
asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1670263926556/work
|
10 |
+
async-lru @ file:///home/conda/feedstock_root/build_artifacts/async-lru_1690563019058/work
|
11 |
+
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1683424013410/work
|
12 |
+
Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1677767029043/work
|
13 |
+
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
|
14 |
+
backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1687772187254/work
|
15 |
+
beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1680888073205/work
|
16 |
+
bitsandbytes==0.42.0
|
17 |
+
bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1674535352125/work
|
18 |
+
blis==0.7.10
|
19 |
+
Brotli @ file:///Users/runner/miniforge3/conda-bld/brotli-split_1687884292427/work
|
20 |
+
cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work
|
21 |
+
catalogue==2.0.9
|
22 |
+
certifi==2023.7.22
|
23 |
+
cffi @ file:///Users/runner/miniforge3/conda-bld/cffi_1671179605388/work
|
24 |
+
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1688813409104/work
|
25 |
+
chex==0.1.7
|
26 |
+
click==8.1.7
|
27 |
+
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work
|
28 |
+
comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1691044910542/work
|
29 |
+
confection==0.1.3
|
30 |
+
contourpy @ file:///Users/runner/miniforge3/conda-bld/contourpy_1686734165903/work
|
31 |
+
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work
|
32 |
+
cymem==2.0.8
|
33 |
+
debugpy @ file:///Users/runner/miniforge3/conda-bld/debugpy_1691021385546/work
|
34 |
+
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
|
35 |
+
defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work
|
36 |
+
dm-tree==0.1.8
|
37 |
+
dnspython==2.6.1
|
38 |
+
email_validator==2.2.0
|
39 |
+
entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work
|
40 |
+
etils==1.3.0
|
41 |
+
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1688381075899/work
|
42 |
+
executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1667317341051/work
|
43 |
+
faiss-cpu==1.7.4
|
44 |
+
fastapi==0.111.1
|
45 |
+
fastapi-cli==0.0.4
|
46 |
+
fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1690055433477/work/dist
|
47 |
+
ffmpy==0.4.0
|
48 |
+
filelock==3.12.2
|
49 |
+
flax==0.7.2
|
50 |
+
flit_core @ file:///home/conda/feedstock_root/build_artifacts/flit-core_1684084314667/work/source/flit_core
|
51 |
+
fonttools @ file:///Users/runner/miniforge3/conda-bld/fonttools_1691006833604/work
|
52 |
+
fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1638810296540/work/dist
|
53 |
+
fsspec==2023.9.1
|
54 |
+
gradio==4.39.0
|
55 |
+
gradio_client==1.1.1
|
56 |
+
h11==0.14.0
|
57 |
+
httpcore==1.0.5
|
58 |
+
httptools==0.6.1
|
59 |
+
httpx==0.27.0
|
60 |
+
huggingface-hub==0.24.2
|
61 |
+
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1663625384323/work
|
62 |
+
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1688754491823/work
|
63 |
+
importlib-resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1691408075105/work
|
64 |
+
ipykernel @ file:///Users/runner/miniforge3/conda-bld/ipykernel_1691424478312/work
|
65 |
+
ipython @ file:///Users/runner/miniforge3/conda-bld/ipython_1683289207929/work
|
66 |
+
ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1690877070294/work
|
67 |
+
isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1638811571363/work/dist
|
68 |
+
jax==0.4.13
|
69 |
+
jaxlib==0.4.13
|
70 |
+
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1690896916983/work
|
71 |
+
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1654302431367/work
|
72 |
+
joblib @ file:///home/conda/feedstock_root/build_artifacts/joblib_1691577114857/work
|
73 |
+
json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1688248289187/work
|
74 |
+
jsonpointer==2.0
|
75 |
+
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema-meta_1691407964752/work
|
76 |
+
jsonschema-specifications @ file:///home/conda/feedstock_root/build_artifacts/jsonschema-specifications_1689701150890/work
|
77 |
+
jupyter @ file:///Users/runner/miniforge3/conda-bld/jupyter_1670249744855/work
|
78 |
+
jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1678118109161/work
|
79 |
+
jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/jupyter_events_1691505939576/work
|
80 |
+
jupyter-lsp @ file:///home/conda/feedstock_root/build_artifacts/jupyter-lsp-meta_1685453365113/work/jupyter-lsp
|
81 |
+
jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1687700988094/work
|
82 |
+
jupyter_core @ file:///Users/runner/miniforge3/conda-bld/jupyter_core_1686775732472/work
|
83 |
+
jupyter_server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1687869799272/work
|
84 |
+
jupyter_server_terminals @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_terminals_1673491454549/work
|
85 |
+
jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1691085900640/work
|
86 |
+
jupyterlab-pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1649936611996/work
|
87 |
+
jupyterlab-widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1688489450369/work
|
88 |
+
jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server_1690205927615/work
|
89 |
+
kiwisolver @ file:///Users/runner/miniforge3/conda-bld/kiwisolver_1666805848360/work
|
90 |
+
langcodes==3.3.0
|
91 |
+
markdown-it-py==3.0.0
|
92 |
+
MarkupSafe @ file:///Users/runner/miniforge3/conda-bld/markupsafe_1685769224358/work
|
93 |
+
matplotlib @ file:///Users/runner/miniforge3/conda-bld/matplotlib-suite_1688684899624/work
|
94 |
+
matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1660814786464/work
|
95 |
+
mdurl==0.1.2
|
96 |
+
micrograd==0.1.0
|
97 |
+
mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1686313613819/work/dist
|
98 |
+
ml-dtypes==0.2.0
|
99 |
+
mpmath==1.3.0
|
100 |
+
msgpack==1.0.8
|
101 |
+
munkres==1.1.4
|
102 |
+
murmurhash==1.0.10
|
103 |
+
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1684790896106/work
|
104 |
+
nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert-meta_1690313290323/work
|
105 |
+
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1690814868471/work
|
106 |
+
nest-asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1664684991461/work
|
107 |
+
networkx==3.1
|
108 |
+
nltk==3.8.1
|
109 |
+
notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1691139191280/work
|
110 |
+
notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1682360583588/work
|
111 |
+
numpy @ file:///Users/runner/miniforge3/conda-bld/numpy_1687808433287/work
|
112 |
+
opt-einsum==3.3.0
|
113 |
+
optax==0.1.8
|
114 |
+
orbax-checkpoint==0.2.3
|
115 |
+
orjson==3.10.6
|
116 |
+
overrides @ file:///home/conda/feedstock_root/build_artifacts/overrides_1691338815398/work
|
117 |
+
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1681337016113/work
|
118 |
+
pandas @ file:///Users/runner/miniforge3/conda-bld/pandas_1688740593370/work
|
119 |
+
pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work
|
120 |
+
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1638334955874/work
|
121 |
+
pathy==0.10.2
|
122 |
+
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1667297516076/work
|
123 |
+
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
|
124 |
+
Pillow @ file:///Users/runner/miniforge3/conda-bld/pillow_1688255913792/work
|
125 |
+
pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1633981968097/work
|
126 |
+
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1690813113769/work
|
127 |
+
pooch @ file:///home/conda/feedstock_root/build_artifacts/pooch_1679580333621/work
|
128 |
+
preshed==3.0.9
|
129 |
+
prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work
|
130 |
+
prompt-toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1688565951714/work
|
131 |
+
psutil @ file:///Users/runner/miniforge3/conda-bld/psutil_1681775366577/work
|
132 |
+
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
|
133 |
+
pure-eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1642875951954/work
|
134 |
+
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
|
135 |
+
pydantic==2.3.0
|
136 |
+
pydantic_core==2.6.3
|
137 |
+
pydub==0.25.1
|
138 |
+
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1691408637400/work
|
139 |
+
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1652235407899/work
|
140 |
+
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1661604839144/work
|
141 |
+
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1626286286081/work
|
142 |
+
python-dotenv==1.0.1
|
143 |
+
python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work
|
144 |
+
python-multipart==0.0.9
|
145 |
+
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1680088766131/work
|
146 |
+
PyYAML @ file:///Users/runner/miniforge3/conda-bld/pyyaml_1666772523114/work
|
147 |
+
pyzmq @ file:///Users/runner/miniforge3/conda-bld/pyzmq_1691667741827/work
|
148 |
+
referencing @ file:///home/conda/feedstock_root/build_artifacts/referencing_1691337268233/work
|
149 |
+
regex==2023.8.8
|
150 |
+
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1684774241324/work
|
151 |
+
rfc3339-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1638811747357/work
|
152 |
+
rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work
|
153 |
+
rich==13.7.1
|
154 |
+
rpds-py @ file:///Users/runner/miniforge3/conda-bld/rpds-py_1689705310488/work
|
155 |
+
ruff==0.5.5
|
156 |
+
safetensors==0.4.3
|
157 |
+
scikit-learn @ file:///Users/runner/miniforge3/conda-bld/scikit-learn_1688116469994/work
|
158 |
+
scipy @ file:///Users/runner/miniforge3/conda-bld/scipy-split_1683900453702/work/base/dist/scipy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl#sha256=06d649e33fd400c8608c1083a7bb6f490989227277eae7f65577d925d87848e8
|
159 |
+
semantic-version==2.10.0
|
160 |
+
Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1628511208346/work
|
161 |
+
sentencepiece==0.2.0
|
162 |
+
shellingham==1.5.4
|
163 |
+
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
|
164 |
+
smart-open==6.4.0
|
165 |
+
sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1662051266223/work
|
166 |
+
soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1658207591808/work
|
167 |
+
spacy==3.6.1
|
168 |
+
spacy-legacy==3.0.12
|
169 |
+
spacy-loggers==1.0.5
|
170 |
+
srsly==2.4.7
|
171 |
+
stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work
|
172 |
+
starlette==0.37.2
|
173 |
+
sympy==1.12
|
174 |
+
tensorstore==0.1.45
|
175 |
+
terminado @ file:///Users/runner/miniforge3/conda-bld/terminado_1670254106711/work
|
176 |
+
thinc==8.1.12
|
177 |
+
threadpoolctl @ file:///home/conda/feedstock_root/build_artifacts/threadpoolctl_1689261241048/work
|
178 |
+
tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1666100256010/work
|
179 |
+
tokenizers==0.19.1
|
180 |
+
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1644342247877/work
|
181 |
+
tomlkit==0.12.0
|
182 |
+
toolz==0.12.1
|
183 |
+
torch==2.0.1
|
184 |
+
torchaudio==2.0.2
|
185 |
+
torchvision==0.15.2
|
186 |
+
tornado @ file:///Users/runner/miniforge3/conda-bld/tornado_1684150249057/work
|
187 |
+
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1691671248568/work
|
188 |
+
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1675110562325/work
|
189 |
+
transformers==4.43.1
|
190 |
+
typer==0.12.3
|
191 |
+
typing-utils @ file:///home/conda/feedstock_root/build_artifacts/typing_utils_1622899189314/work
|
192 |
+
typing_extensions==4.12.2
|
193 |
+
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1680081134351/work
|
194 |
+
unicodedata2 @ file:///Users/runner/miniforge3/conda-bld/unicodedata2_1667240193675/work
|
195 |
+
uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1688655812972/work/dist
|
196 |
+
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1689789803562/work
|
197 |
+
uvicorn==0.30.3
|
198 |
+
uvloop==0.19.0
|
199 |
+
wasabi==1.1.2
|
200 |
+
watchfiles==0.22.0
|
201 |
+
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1673864653149/work
|
202 |
+
webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1679900785843/work
|
203 |
+
webencodings==0.5.1
|
204 |
+
websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1687789148259/work
|
205 |
+
websockets==11.0.3
|
206 |
+
widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1688504439014/work
|
207 |
+
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1689374466814/work
|