Upload app.py
Browse files
app.py
CHANGED
@@ -108,13 +108,14 @@ def instruct_generate(
|
|
108 |
return output
|
109 |
|
110 |
# 配置具体参数
|
111 |
-
pretrained_path = hf_hub_download(
|
112 |
-
|
113 |
tokenizer_path = hf_hub_download(
|
114 |
repo_id="Gary3410/pretrain_lit_llama", filename="tokenizer.model")
|
115 |
adapter_path = hf_hub_download(
|
116 |
repo_id="Gary3410/pretrain_lit_llama", filename="lit-llama-adapter-finetuned_15k.pth")
|
117 |
# adapter_path = "lit-llama-adapter-finetuned_15k.pth"
|
|
|
118 |
example_path = "example.json"
|
119 |
# 1024如果不够, 调整为512
|
120 |
max_seq_len = 1024
|
|
|
108 |
return output
|
109 |
|
110 |
# 配置具体参数
|
111 |
+
# pretrained_path = hf_hub_download(
|
112 |
+
# repo_id="Gary3410/pretrain_lit_llama", filename="lit-llama.pth")
|
113 |
tokenizer_path = hf_hub_download(
|
114 |
repo_id="Gary3410/pretrain_lit_llama", filename="tokenizer.model")
|
115 |
adapter_path = hf_hub_download(
|
116 |
repo_id="Gary3410/pretrain_lit_llama", filename="lit-llama-adapter-finetuned_15k.pth")
|
117 |
# adapter_path = "lit-llama-adapter-finetuned_15k.pth"
|
118 |
+
pretrained_path = "lit-llama.pth"
|
119 |
example_path = "example.json"
|
120 |
# 1024如果不够, 调整为512
|
121 |
max_seq_len = 1024
|