Lingo-IITGN commited on
Commit
99689c5
·
verified ·
1 Parent(s): f95955d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -1,27 +1,27 @@
1
- import gradio as gr, spaces
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
 
5
- # tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b")
6
- # model = AutoModelForCausalLM.from_pretrained("LingoIITGN/ganga-1b")
7
-
8
- # @spaces.GPU
9
- # def greet(input_text):
10
- # input_token = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
11
- # output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
12
- # output_text = tokenizer.batch_decode(output)[0]
13
- # return output_text
14
-
15
- # demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
16
 
17
  @spaces.GPU
18
  def greet(input_text):
19
- input_token = tokenizer.encode(input_text, return_tensors="pt").to("cpu")
20
-
21
  output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
22
  output_text = tokenizer.batch_decode(output)[0]
23
  return output_text
24
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
27
  demo.launch()
 
1
+ import spaces, gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("LingoIITGN/ganga-1b")
6
+ model = AutoModelForCausalLM.from_pretrained("LingoIITGN/ganga-1b")
 
 
 
 
 
 
 
 
 
7
 
8
  @spaces.GPU
9
  def greet(input_text):
10
+ input_token = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
 
11
  output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
12
  output_text = tokenizer.batch_decode(output)[0]
13
  return output_text
14
 
15
+ # demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
16
+
17
+ # @spaces.GPU
18
+ # def greet(input_text):
19
+ # input_token = tokenizer.encode(input_text, return_tensors="pt").to("cpu")
20
+
21
+ # output = model.generate(input_token, max_new_tokens=100, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, temperature=0.7)
22
+ # output_text = tokenizer.batch_decode(output)[0]
23
+ # return output_text
24
+
25
 
26
  demo = gr.Interface(fn=greet, inputs=["text"], outputs=["text"],)
27
  demo.launch()