Locon213 commited on
Commit
8d1ac51
·
verified ·
1 Parent(s): 15b72f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -1,22 +1,26 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Загружаем модель и токенизатор
5
  model_name = "nvidia/Hymba-1.5B-Instruct"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- # Функция генерации текста
 
 
 
 
10
  def generate_text(prompt):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
  outputs = model.generate(**inputs, max_length=100)
13
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
15
- # Интерфейс Gradio
16
  interface = gr.Interface(
17
  fn=generate_text,
18
  inputs=gr.Textbox(label="Введите запрос"),
19
  outputs=gr.Textbox(label="Ответ")
20
  )
21
 
 
22
  interface.launch()
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Имя модели
5
  model_name = "nvidia/Hymba-1.5B-Instruct"
 
 
6
 
7
+ # Загрузка токенизатора и модели
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
10
+
11
+ # Функция для генерации текста
12
  def generate_text(prompt):
13
  inputs = tokenizer(prompt, return_tensors="pt")
14
  outputs = model.generate(**inputs, max_length=100)
15
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
16
 
17
+ # Gradio интерфейс
18
  interface = gr.Interface(
19
  fn=generate_text,
20
  inputs=gr.Textbox(label="Введите запрос"),
21
  outputs=gr.Textbox(label="Ответ")
22
  )
23
 
24
+ # Запуск
25
  interface.launch()
26
+