Update README.md
Browse files
README.md
CHANGED
@@ -49,9 +49,14 @@ for few-shots prompt use:
|
|
49 |
# Python
|
50 |
|
51 |
```python
|
|
|
|
|
|
|
52 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
53 |
|
54 |
model = AutoModelForCausalLM.from_pretrained("scribis/Fantastica-7b-Instruct-0.2-Italian_merged")
|
|
|
|
|
55 |
tokenizer = AutoTokenizer.from_pretrained("scribis/Fantastica-7b-Instruct-0.2-Italian_merged")
|
56 |
|
57 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=768, temperature=0.75, repetition_penalty=1.2, do_sample=True)
|
|
|
49 |
# Python
|
50 |
|
51 |
```python
|
52 |
+
# For faster text generation: !pip install accelerate
|
53 |
+
# import accelerate
|
54 |
+
|
55 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
56 |
|
57 |
model = AutoModelForCausalLM.from_pretrained("scribis/Fantastica-7b-Instruct-0.2-Italian_merged")
|
58 |
+
# with accelerate: model = AutoModelForCausalLM.from_pretrained("scribis/Fantastica-7b-Instruct-0.2-Italian_merged", device_map = 'cuda')
|
59 |
+
|
60 |
tokenizer = AutoTokenizer.from_pretrained("scribis/Fantastica-7b-Instruct-0.2-Italian_merged")
|
61 |
|
62 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=768, temperature=0.75, repetition_penalty=1.2, do_sample=True)
|