Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,8 @@ from transformers import (
|
|
11 |
AutoModelForCausalLM,
|
12 |
AutoTokenizer,
|
13 |
BitsAndBytesConfig,
|
14 |
-
pipeline
|
|
|
15 |
)
|
16 |
|
17 |
|
@@ -38,45 +39,60 @@ instructor_embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-
|
|
38 |
model_name='SherlockAssistant/Mistral-7B-Instruct-Ukrainian'
|
39 |
|
40 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
41 |
-
tokenizer.pad_token = tokenizer.
|
42 |
tokenizer.padding_side = "right"
|
43 |
|
44 |
|
45 |
-
# Activate 4-bit precision base model loading
|
46 |
-
use_4bit = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
# Compute dtype for 4-bit base models
|
49 |
-
bnb_4bit_compute_dtype = "float16"
|
50 |
-
|
51 |
-
# Quantization type (fp4 or nf4)
|
52 |
-
bnb_4bit_quant_type = "nf4"
|
53 |
-
|
54 |
-
# Activate nested quantization for 4-bit base models (double quantization)
|
55 |
-
use_nested_quant = False
|
56 |
-
|
57 |
-
|
58 |
-
compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
|
59 |
-
|
60 |
-
bnb_config = BitsAndBytesConfig(
|
61 |
-
load_in_4bit=use_4bit,
|
62 |
-
bnb_4bit_quant_type=bnb_4bit_quant_type,
|
63 |
-
bnb_4bit_compute_dtype=compute_dtype,
|
64 |
-
bnb_4bit_use_double_quant=use_nested_quant,
|
65 |
-
)
|
66 |
-
|
67 |
-
# Check GPU compatibility with bfloat16
|
68 |
-
if compute_dtype == torch.float16 and use_4bit:
|
69 |
-
major, _ = torch.cuda.get_device_capability()
|
70 |
-
if major >= 8:
|
71 |
-
print("=" * 80)
|
72 |
-
print("Your GPU supports bfloat16: accelerate training with bf16=True")
|
73 |
-
print("=" * 80)
|
74 |
-
|
75 |
-
|
76 |
-
model = AutoModelForCausalLM.from_pretrained(
|
77 |
-
model_name,
|
78 |
-
quantization_config=bnb_config,
|
79 |
-
)
|
80 |
|
81 |
text_generation_pipeline = pipeline(
|
82 |
model=model,
|
@@ -86,10 +102,11 @@ text_generation_pipeline = pipeline(
|
|
86 |
repetition_penalty=1.2,
|
87 |
return_full_text=True,
|
88 |
max_new_tokens=750, do_sample=True,
|
89 |
-
top_k=50, top_p=0.95
|
|
|
90 |
)
|
91 |
mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
|
92 |
-
# # load chroma from disk
|
93 |
db3 = Chroma(persist_directory="/content/gdrive/MyDrive/diploma/all_emb/chroma/", embedding_function=instructor_embeddings)
|
94 |
|
95 |
|
@@ -130,13 +147,11 @@ rag_chain_with_source = RunnableParallel(
|
|
130 |
|
131 |
|
132 |
def format_result(result):
|
133 |
-
# Extract unique pairs of titles and video IDs from the context
|
134 |
unique_videos = set((doc.metadata['title'], doc.metadata['act_url']) for doc in result['context'])
|
135 |
|
136 |
# Create a plain text string where each title is followed by its URL
|
137 |
titles_with_links = [
|
138 |
-
|
139 |
-
f"{title}: {act_url}" for title, act_url in unique_videos
|
140 |
]
|
141 |
|
142 |
# Join these entries with line breaks to form a clear list
|
@@ -154,7 +169,7 @@ def format_result(result):
|
|
154 |
def generate_with_filters(message, subject_input, rubric, date_beg, date_end):
|
155 |
if date_beg == "2010-01-01" and date_end == "2025-01-01":
|
156 |
rag_chain_with_filters = RunnableParallel(
|
157 |
-
{"context": db3.as_retriever(search_type="mmr", search_kwargs={"k":
|
158 |
"filter": {'$and': [{'subject': {
|
159 |
'$in': subject_input}}, {
|
160 |
'rubric': {
|
@@ -163,11 +178,11 @@ def generate_with_filters(message, subject_input, rubric, date_beg, date_end):
|
|
163 |
).assign(answer=rag_chain_from_docs)
|
164 |
else:
|
165 |
rag_chain_with_filters = RunnableParallel(
|
166 |
-
{"context": db3.as_retriever(search_type="mmr", search_kwargs={"k":
|
167 |
"filter": {'$and': [{'subject': {
|
168 |
'$in': subject_input}}, {
|
169 |
'rubric': {
|
170 |
-
'$in': rubric}},"act_date": {"$gte": date_beg}, "act_date": {"$lte": date_end}] }}), "question": RunnablePassthrough()}
|
171 |
).assign(answer=rag_chain_from_docs)
|
172 |
result = rag_chain_with_filters.invoke(message)
|
173 |
return result
|
|
|
11 |
AutoModelForCausalLM,
|
12 |
AutoTokenizer,
|
13 |
BitsAndBytesConfig,
|
14 |
+
pipeline,
|
15 |
+
StoppingCriteria, StoppingCriteriaList
|
16 |
)
|
17 |
|
18 |
|
|
|
39 |
model_name='SherlockAssistant/Mistral-7B-Instruct-Ukrainian'
|
40 |
|
41 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
42 |
+
tokenizer.pad_token = tokenizer.unk_token
|
43 |
tokenizer.padding_side = "right"
|
44 |
|
45 |
|
46 |
+
# # Activate 4-bit precision base model loading
|
47 |
+
# use_4bit = True
|
48 |
+
|
49 |
+
# # Compute dtype for 4-bit base models
|
50 |
+
# bnb_4bit_compute_dtype = "float16"
|
51 |
+
|
52 |
+
# # Quantization type (fp4 or nf4)
|
53 |
+
# bnb_4bit_quant_type = "nf4"
|
54 |
+
|
55 |
+
# # Activate nested quantization for 4-bit base models (double quantization)
|
56 |
+
# use_nested_quant = False
|
57 |
+
|
58 |
+
# #################################################################
|
59 |
+
# # Set up quantization config
|
60 |
+
# #################################################################
|
61 |
+
# compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
|
62 |
+
|
63 |
+
# bnb_config = BitsAndBytesConfig(
|
64 |
+
# load_in_4bit=use_4bit,
|
65 |
+
# bnb_4bit_quant_type=bnb_4bit_quant_type,
|
66 |
+
# bnb_4bit_compute_dtype=compute_dtype,
|
67 |
+
# bnb_4bit_use_double_quant=use_nested_quant,
|
68 |
+
# )
|
69 |
+
|
70 |
+
# # Check GPU compatibility with bfloat16
|
71 |
+
# if compute_dtype == torch.float16 and use_4bit:
|
72 |
+
# major, _ = torch.cuda.get_device_capability()
|
73 |
+
# if major >= 8:
|
74 |
+
# print("=" * 80)
|
75 |
+
# print("Your GPU supports bfloat16: accelerate training with bf16=True")
|
76 |
+
# print("=" * 80)
|
77 |
+
|
78 |
+
|
79 |
+
# model = AutoModelForCausalLM.from_pretrained(
|
80 |
+
# model_name,
|
81 |
+
# quantization_config=bnb_config,
|
82 |
+
# )
|
83 |
+
stop_list = [" \n\nAnswer:", " \n", " \n\n"]
|
84 |
+
stop_token_ids = [tokenizer(x, return_tensors='pt', add_special_tokens=False)['input_ids'] for x in stop_list]
|
85 |
+
stop_token_ids = [torch.LongTensor(x).to("cuda") for x in stop_token_ids]
|
86 |
+
|
87 |
+
class StopOnTokens(StoppingCriteria):
|
88 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
89 |
+
for stop_ids in stop_token_ids:
|
90 |
+
if torch.eq(input_ids[0][-len(stop_ids[0])+1:], stop_ids[0][1:]).all():
|
91 |
+
return True
|
92 |
+
return False
|
93 |
+
|
94 |
+
stopping_criteria = StoppingCriteriaList([StopOnTokens()])
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
text_generation_pipeline = pipeline(
|
98 |
model=model,
|
|
|
102 |
repetition_penalty=1.2,
|
103 |
return_full_text=True,
|
104 |
max_new_tokens=750, do_sample=True,
|
105 |
+
top_k=50, top_p=0.95,
|
106 |
+
stopping_criteria=stopping_criteria
|
107 |
)
|
108 |
mistral_llm = HuggingFacePipeline(pipeline=text_generation_pipeline)
|
109 |
+
# # # load chroma from disk
|
110 |
db3 = Chroma(persist_directory="/content/gdrive/MyDrive/diploma/all_emb/chroma/", embedding_function=instructor_embeddings)
|
111 |
|
112 |
|
|
|
147 |
|
148 |
|
149 |
def format_result(result):
|
|
|
150 |
unique_videos = set((doc.metadata['title'], doc.metadata['act_url']) for doc in result['context'])
|
151 |
|
152 |
# Create a plain text string where each title is followed by its URL
|
153 |
titles_with_links = [
|
154 |
+
f"{title}: {act_url}" for title, act_url in unique_videos
|
|
|
155 |
]
|
156 |
|
157 |
# Join these entries with line breaks to form a clear list
|
|
|
169 |
def generate_with_filters(message, subject_input, rubric, date_beg, date_end):
|
170 |
if date_beg == "2010-01-01" and date_end == "2025-01-01":
|
171 |
rag_chain_with_filters = RunnableParallel(
|
172 |
+
{"context": db3.as_retriever(search_type="mmr", search_kwargs={"k": 10,
|
173 |
"filter": {'$and': [{'subject': {
|
174 |
'$in': subject_input}}, {
|
175 |
'rubric': {
|
|
|
178 |
).assign(answer=rag_chain_from_docs)
|
179 |
else:
|
180 |
rag_chain_with_filters = RunnableParallel(
|
181 |
+
{"context": db3.as_retriever(search_type="mmr", search_kwargs={"k": 10,
|
182 |
"filter": {'$and': [{'subject': {
|
183 |
'$in': subject_input}}, {
|
184 |
'rubric': {
|
185 |
+
'$in': rubric}},{"act_date": {"$gte": date_beg}}, {"act_date": {"$lte": date_end}}] }}), "question": RunnablePassthrough()}
|
186 |
).assign(answer=rag_chain_from_docs)
|
187 |
result = rag_chain_with_filters.invoke(message)
|
188 |
return result
|