VietnamAIHub
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,163 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# VietCoMath Model Usage
|
2 |
+
|
3 |
+
## Overview
|
4 |
+
This repository contains code for running the VietCoMath language model for mathematical problem-solving and text generation tasks.
|
5 |
+
|
6 |
+
## Installation
|
7 |
+
|
8 |
+
### Prerequisites
|
9 |
+
- Python 3.8+
|
10 |
+
- PyTorch
|
11 |
+
- Transformers library
|
12 |
+
|
13 |
+
### Required Dependencies
|
14 |
+
```bash
|
15 |
+
pip install transformers torch
|
16 |
+
```
|
17 |
+
|
18 |
+
## Usage
|
19 |
+
|
20 |
+
### Basic Text Generation
|
21 |
+
```python
|
22 |
+
import transformers
|
23 |
+
import torch
|
24 |
+
|
25 |
+
# Load the model
|
26 |
+
model_id = "VietnamAIHub/VietCoMath-o1-8B"
|
27 |
+
pipeline = transformers.pipeline(
|
28 |
+
"text-generation",
|
29 |
+
model=model_id,
|
30 |
+
model_kwargs={"torch_dtype": torch.bfloat16},
|
31 |
+
device_map="auto",
|
32 |
+
)
|
33 |
+
|
34 |
+
# Prepare messages
|
35 |
+
messages = [
|
36 |
+
{"role": "system", "content": ""},
|
37 |
+
{"role": "user", "content": "Who are you?"},
|
38 |
+
]
|
39 |
+
|
40 |
+
# Define terminators
|
41 |
+
terminators = [
|
42 |
+
pipeline.tokenizer.eos_token_id,
|
43 |
+
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
44 |
+
]
|
45 |
+
|
46 |
+
# Generate text
|
47 |
+
outputs = pipeline(
|
48 |
+
messages,
|
49 |
+
max_new_tokens=256,
|
50 |
+
eos_token_id=terminators,
|
51 |
+
do_sample=True,
|
52 |
+
temperature=0.6,
|
53 |
+
top_p=0.9,
|
54 |
+
)
|
55 |
+
|
56 |
+
# Print generated text
|
57 |
+
print(outputs[0]["generated_text"][-1])
|
58 |
+
```
|
59 |
+
|
60 |
+
### Advanced Usage with Response Parsing
|
61 |
+
|
62 |
+
#### Helper Functions
|
63 |
+
```python
|
64 |
+
import re
|
65 |
+
|
66 |
+
def check_patterns(response):
|
67 |
+
"""
|
68 |
+
Check if the response contains all required XML patterns.
|
69 |
+
|
70 |
+
Args:
|
71 |
+
response (str): The model's generated response
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
str: Parsed response or 'Missing' if patterns are incomplete
|
75 |
+
"""
|
76 |
+
patterns = {
|
77 |
+
'answer': r'<answer>(.*?)</answer>',
|
78 |
+
'reflection': r'<reflection>(.*?)</reflection>',
|
79 |
+
'steps': r'<step>(.*?)</step>',
|
80 |
+
'count': r'<count>(.*?)</count>'
|
81 |
+
}
|
82 |
+
|
83 |
+
matches = {
|
84 |
+
'answer': re.search(patterns['answer'], response, re.DOTALL),
|
85 |
+
'reflection': re.search(patterns['reflection'], response, re.DOTALL),
|
86 |
+
'steps': re.findall(patterns['steps'], response, re.DOTALL),
|
87 |
+
'count': re.findall(patterns['count'], response, re.DOTALL)
|
88 |
+
}
|
89 |
+
|
90 |
+
return "Missing" if not all([matches['answer'], matches['reflection'], matches['steps'], matches['count']]) else response
|
91 |
+
|
92 |
+
def parse_response(response):
|
93 |
+
"""
|
94 |
+
Parse the model's response and extract key components.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
response (str): The model's generated response
|
98 |
+
|
99 |
+
Returns:
|
100 |
+
tuple: Parsed answer, reflection, steps, and clarification
|
101 |
+
"""
|
102 |
+
response_check = check_patterns(response)
|
103 |
+
|
104 |
+
if response_check == "Missing":
|
105 |
+
clarification_match = re.search(r'<clarification>(.*?)</clarification>', response, re.DOTALL)
|
106 |
+
clarification = clarification_match.group(1).strip() if clarification_match else response
|
107 |
+
return "", "", [], clarification
|
108 |
+
else:
|
109 |
+
answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
|
110 |
+
reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
|
111 |
+
|
112 |
+
answer = answer_match.group(1).strip() if answer_match else ""
|
113 |
+
reflection = reflection_match.group(1).strip() if reflection_match else ""
|
114 |
+
steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
|
115 |
+
|
116 |
+
return answer, reflection, steps, ""
|
117 |
+
```
|
118 |
+
|
119 |
+
## Example Problem Solving
|
120 |
+
```python
|
121 |
+
from openai import OpenAI
|
122 |
+
|
123 |
+
client = OpenAI(
|
124 |
+
base_url="http://13.65.249.11:8887/v1",
|
125 |
+
api_key="token-abc123",
|
126 |
+
)
|
127 |
+
|
128 |
+
# Example mathematical word problem
|
129 |
+
problem = "Có 100 sinh viên đỗ đại học. Trong số đó, có 55 sinh viên chọn âm nhạc, 44 sinh viên chọn thể thao, và 20 sinh viên chọn cả 2. Hỏi có bao nhiêu sinh viên không chọn âm nhạc, cũng không chọn thể thao?"
|
130 |
+
|
131 |
+
completion = client.chat.completions.create(
|
132 |
+
model="/LLM_32T/pretrained_weights/preview_model/VietCoMath_8B_instruct_2024_11",
|
133 |
+
messages=[
|
134 |
+
{"role": "system", "content": ""},
|
135 |
+
{"role": "user", "content": problem}
|
136 |
+
],
|
137 |
+
temperature=0.6,
|
138 |
+
top_p=0.9,
|
139 |
+
max_tokens=4096,
|
140 |
+
)
|
141 |
+
|
142 |
+
generated_text = completion.choices[0].message.content
|
143 |
+
answer, reflection, steps, clarification = parse_response(generated_text)
|
144 |
+
```
|
145 |
+
|
146 |
+
## Notes
|
147 |
+
- Ensure you have a stable internet connection
|
148 |
+
- The model requires significant computational resources
|
149 |
+
- Performance may vary based on input complexity
|
150 |
+
|
151 |
+
## Limitations
|
152 |
+
- The model is trained primarily on Vietnamese mathematical problems
|
153 |
+
- May require fine-tuning for specific use cases
|
154 |
+
|
155 |
+
## License
|
156 |
+
[Insert Appropriate License Information]
|
157 |
+
|
158 |
+
## Citation
|
159 |
+
[If applicable, add citation information for the model]
|
160 |
+
```
|
161 |
+
|
162 |
+
## Contribution
|
163 |
+
Feel free to open issues or submit pull requests to improve the code and documentation.
|