gba16326553 commited on
Commit
321be75
·
verified ·
1 Parent(s): 718b303

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -1
app.py CHANGED
@@ -14,6 +14,15 @@ model = AutoModelForCausalLM.from_pretrained("models/google/gemma-2-2b-it", torc
14
  #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
15
  #The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.
16
 
 
 
 
 
 
 
 
 
 
17
  def predict(input, history=[]):
18
  # tokenize the new input sentence
19
  new_user_input_ids = tokenizer.encode(
@@ -47,4 +56,5 @@ gr.Interface(
47
  inputs=["text", "state"],
48
  outputs=["chatbot", "state"],
49
  theme="finlaymacklon/boxy_violet",
50
- ).launch()
 
 
14
  #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
15
  #The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.
16
 
17
+ def generate_text(prompt):
18
+ inputs = tokenizer(prompt, return_tensors="pt")
19
+ outputs = model.generate(**inputs)
20
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+
22
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
23
+
24
+
25
+ """
26
  def predict(input, history=[]):
27
  # tokenize the new input sentence
28
  new_user_input_ids = tokenizer.encode(
 
56
  inputs=["text", "state"],
57
  outputs=["chatbot", "state"],
58
  theme="finlaymacklon/boxy_violet",
59
+ ).launch()
60
+ """