Spaces:
Sleeping
Sleeping
dntrplytch
commited on
reverted data type, modified TextIteratorStreamer timeout to 120
Browse files
app.py
CHANGED
@@ -26,7 +26,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
model_id,
|
28 |
device_map="auto",
|
29 |
-
torch_dtype=torch.
|
30 |
)
|
31 |
model.eval()
|
32 |
|
@@ -57,7 +57,7 @@ def generate(
|
|
57 |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
58 |
input_ids = input_ids.to(model.device)
|
59 |
|
60 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=
|
61 |
generate_kwargs = dict(
|
62 |
{"input_ids": input_ids},
|
63 |
streamer=streamer,
|
|
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
model_id,
|
28 |
device_map="auto",
|
29 |
+
torch_dtype=torch.bfloat16,
|
30 |
)
|
31 |
model.eval()
|
32 |
|
|
|
57 |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
58 |
input_ids = input_ids.to(model.device)
|
59 |
|
60 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=120.0, skip_prompt=True, skip_special_tokens=True)
|
61 |
generate_kwargs = dict(
|
62 |
{"input_ids": input_ids},
|
63 |
streamer=streamer,
|