Update README.md
Browse files
README.md
CHANGED
@@ -77,7 +77,7 @@ model.eval()
|
|
77 |
```
|
78 |
-------------------------------------------------------------------------------------------------------
|
79 |
|
80 |
-
#
|
81 |
```
|
82 |
def Trained_Llama3_1_inference(prompt):
|
83 |
model.eval()
|
@@ -110,7 +110,7 @@ Trained_Llama3_1_inference("What are qualities of good Sales-person ?")
|
|
110 |
#### Training (on Kaggle Notebook)
|
111 |
This training is done on Kaggle Notebook enabling GPU(Required in quantized training/ inference).
|
112 |
|
113 |
-
#
|
114 |
```
|
115 |
%%capture
|
116 |
!pip install -U transformers[torch] datasets
|
@@ -120,7 +120,7 @@ Trained_Llama3_1_inference("What are qualities of good Sales-person ?")
|
|
120 |
```
|
121 |
|
122 |
------------------------------------------------------------------------------------------------------------------------------------------
|
123 |
-
#
|
124 |
```
|
125 |
from transformers import BitsAndBytesConfig, AutoTokenizer, AutoModelForCausalLM, TrainingArguments
|
126 |
from trl import SFTTrainer
|
|
|
77 |
```
|
78 |
-------------------------------------------------------------------------------------------------------
|
79 |
|
80 |
+
# Creating Inference Point
|
81 |
```
|
82 |
def Trained_Llama3_1_inference(prompt):
|
83 |
model.eval()
|
|
|
110 |
#### Training (on Kaggle Notebook)
|
111 |
This training is done on Kaggle Notebook enabling GPU(Required in quantized training/ inference).
|
112 |
|
113 |
+
# Install Dependencies
|
114 |
```
|
115 |
%%capture
|
116 |
!pip install -U transformers[torch] datasets
|
|
|
120 |
```
|
121 |
|
122 |
------------------------------------------------------------------------------------------------------------------------------------------
|
123 |
+
# Import Modules
|
124 |
```
|
125 |
from transformers import BitsAndBytesConfig, AutoTokenizer, AutoModelForCausalLM, TrainingArguments
|
126 |
from trl import SFTTrainer
|