Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -57,7 +57,7 @@ def generate_text(messages):
|
|
57 |
model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
|
58 |
)
|
59 |
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device ) #pipeline has not to(device)
|
60 |
-
result = text_generator(messages, max_new_tokens=
|
61 |
|
62 |
generated_output = result[0]["generated_text"]
|
63 |
if isinstance(generated_output, list):
|
@@ -73,6 +73,8 @@ def generate_text(messages):
|
|
73 |
|
74 |
|
75 |
def call_generate_text(message, history):
|
|
|
|
|
76 |
# history.append({"role": "user", "content": message})
|
77 |
print(message)
|
78 |
print(history)
|
|
|
57 |
model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
|
58 |
)
|
59 |
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device ) #pipeline has not to(device)
|
60 |
+
result = text_generator(messages, max_new_tokens=64, do_sample=True, temperature=0.7)
|
61 |
|
62 |
generated_output = result[0]["generated_text"]
|
63 |
if isinstance(generated_output, list):
|
|
|
73 |
|
74 |
|
75 |
def call_generate_text(message, history):
|
76 |
+
if len(message) == 0:
|
77 |
+
message.append({"role": "system", "content": "you response around 20 words"})
|
78 |
# history.append({"role": "user", "content": message})
|
79 |
print(message)
|
80 |
print(history)
|