Update README.md
Browse files
README.md
CHANGED
@@ -85,7 +85,8 @@ Users (both direct and downstream) should be made aware of the risks, biases and
|
|
85 |
|
86 |
## How to Get Started with the Model
|
87 |
In Google Colab:
|
88 |
-
|
|
|
89 |
!pip install -q -U transformers peft accelerate optimum
|
90 |
!pip install datasets==2.15.0
|
91 |
!pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu117/
|
@@ -124,7 +125,8 @@ prompt_template=f'''<s>[INST] {prompt} [/INST]'''
|
|
124 |
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.cuda()
|
125 |
output = persisted_model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
|
126 |
print(tokenizer.decode(output[0]))
|
127 |
-
|
|
|
128 |
|
129 |
# To perform inference on the test dataset example load the model from the checkpoint
|
130 |
persisted_model = AutoPeftModelForCausalLM.from_pretrained(
|
|
|
85 |
|
86 |
## How to Get Started with the Model
|
87 |
In Google Colab:
|
88 |
+
```
|
89 |
+
|
90 |
!pip install -q -U transformers peft accelerate optimum
|
91 |
!pip install datasets==2.15.0
|
92 |
!pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu117/
|
|
|
125 |
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.cuda()
|
126 |
output = persisted_model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
|
127 |
print(tokenizer.decode(output[0]))
|
128 |
+
|
129 |
+
```
|
130 |
|
131 |
# To perform inference on the test dataset example load the model from the checkpoint
|
132 |
persisted_model = AutoPeftModelForCausalLM.from_pretrained(
|