Spaces:
Running
Running
BuildTools
commited on
Commit
·
fe4d0c0
1
Parent(s):
d64a0eb
cpu3
Browse files
app.py
CHANGED
@@ -8,9 +8,6 @@ from accelerate import infer_auto_device_map, init_empty_weights, load_checkpoin
|
|
8 |
import gradio as gr
|
9 |
import time
|
10 |
|
11 |
-
#使用cpu
|
12 |
-
map_location=torch.device('cpu')
|
13 |
-
|
14 |
model_path = "THUDM/chatglm-6b-int4"
|
15 |
# 载入Tokenizer
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
@@ -18,7 +15,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
|
18 |
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True, pre_seq_len=128)
|
19 |
model = AutoModel.from_pretrained(model_path, config=config, trust_remote_code=True)
|
20 |
# 此处使用你的 ptuning 工作目录
|
21 |
-
prefix_state_dict = torch.load(os.path.join("./xiaowo", "pytorch_model.bin"))
|
22 |
new_prefix_state_dict = {}
|
23 |
for k, v in prefix_state_dict.items():
|
24 |
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
|
|
|
8 |
import gradio as gr
|
9 |
import time
|
10 |
|
|
|
|
|
|
|
11 |
model_path = "THUDM/chatglm-6b-int4"
|
12 |
# 载入Tokenizer
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
|
|
15 |
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True, pre_seq_len=128)
|
16 |
model = AutoModel.from_pretrained(model_path, config=config, trust_remote_code=True)
|
17 |
# 此处使用你的 ptuning 工作目录
|
18 |
+
prefix_state_dict = torch.load(os.path.join("./xiaowo", "pytorch_model.bin"), map_location=torch.device('cpu'))
|
19 |
new_prefix_state_dict = {}
|
20 |
for k, v in prefix_state_dict.items():
|
21 |
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
|