gba16326553 commited on
Commit
f1dca2a
·
verified ·
1 Parent(s): 738a4b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
  import torch
4
 
@@ -6,8 +6,16 @@ title = "🤖AI ChatBot"
6
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
7
  examples = [["How are you?"]]
8
 
9
- tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it")
10
- model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", torch_dtype=torch.float16 )
 
 
 
 
 
 
 
 
11
  #stvlynn/Gemma-2-2b-Chinese-it
12
  #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
13
  #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer,AutoModel
2
  import gradio as gr
3
  import torch
4
 
 
6
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
7
  examples = [["How are you?"]]
8
 
9
+
10
+ # Load model directly
11
+ from transformers import AutoModel
12
+ model = AutoModel.from_pretrained("ironlanderl/gemma-2-2b-it-Q5_K_M-GGUF")
13
+ #modelName = "google/gemma-2-2b-it"
14
+ modelName = "ironlanderl/gemma-2-2b-it-Q5_K_M-GGUF"
15
+ tokenizer = AutoTokenizer.from_pretrained(modelName)
16
+
17
+ model = AutoModel.from_pretrained(modelName)
18
+ #model = AutoModelForCausalLM.from_pretrained("google/gemma-2-2b-it", torch_dtype=torch.float16 )
19
  #stvlynn/Gemma-2-2b-Chinese-it
20
  #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
21
  #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")