Spaces:
Sleeping
Sleeping
MatteoScript
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -4,19 +4,30 @@ import time
|
|
4 |
import os
|
5 |
from dotenv import load_dotenv
|
6 |
from sentence_transformers import SentenceTransformer
|
|
|
7 |
#from langchain_community.vectorstores import Chroma
|
8 |
#from langchain_community.embeddings import HuggingFaceEmbeddings
|
9 |
load_dotenv()
|
|
|
10 |
|
11 |
CHAT_BOTS = {"Mixtral 8x7B v0.1" :"mistralai/Mixtral-8x7B-Instruct-v0.1"}
|
12 |
SYSTEM_PROMPT = ["Sei BonsiAI e mi aiuterai nelle mie richieste (Parla in ITALIANO)", "Esatto, sono BonsiAI. Di cosa hai bisogno?"]
|
13 |
IDENTITY_CHANGE = ["Sei BonsiAI da ora in poi!", "Certo farò del mio meglio"]
|
14 |
options = {
|
15 |
-
'Email Genitori': {'
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
}
|
19 |
|
|
|
|
|
|
|
20 |
#persist_directory1 = './DB_Decreti'
|
21 |
#embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
|
22 |
#db = Chroma(persist_directory=persist_directory1, embedding_function=embedding)
|
@@ -55,19 +66,22 @@ def init_state() :
|
|
55 |
st.session_state.repetion_penalty = 1
|
56 |
|
57 |
if "rag_enabled" not in st.session_state :
|
58 |
-
st.session_state.rag_enabled =
|
59 |
|
60 |
if "chat_bot" not in st.session_state :
|
61 |
st.session_state.chat_bot = "Mixtral 8x7B v0.1"
|
62 |
|
63 |
def sidebar() :
|
64 |
def retrieval_settings() :
|
65 |
-
st.markdown("# Impostazioni
|
66 |
st.session_state.selected_option_key = st.selectbox('Azione', list(options.keys()) + ['+ Aggiungi'])
|
67 |
st.session_state.selected_option = options.get(st.session_state.selected_option_key, {})
|
68 |
-
st.session_state.
|
69 |
-
|
70 |
-
st.session_state.
|
|
|
|
|
|
|
71 |
if st.session_state.selected_option_key == 'Decreti':
|
72 |
st.session_state.rag_enabled = st.toggle("Cerca nel DB Vettoriale", value=True)
|
73 |
st.session_state.top_k = st.slider(label="Documenti da ricercare", min_value=1, max_value=20, value=4, disabled=not st.session_state.rag_enabled)
|
@@ -97,8 +111,16 @@ def chat_box() :
|
|
97 |
st.markdown(message["content"])
|
98 |
|
99 |
def formattaPrompt(prompt, systemRole, systemStyle, instruction):
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
"input": {{
|
103 |
"role": "system",
|
104 |
"content": "{systemRole}",
|
@@ -111,21 +133,23 @@ def formattaPrompt(prompt, systemRole, systemStyle, instruction):
|
|
111 |
}},
|
112 |
{{
|
113 |
"role": "user",
|
114 |
-
"content": "{
|
115 |
}}
|
116 |
]
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
|
121 |
def generate_chat_stream(prompt) :
|
122 |
links = []
|
|
|
|
|
123 |
if st.session_state.rag_enabled :
|
124 |
with st.spinner("Ricerca nei documenti...."):
|
125 |
-
time.sleep(
|
126 |
prompt, links = gen_augmented_prompt(prompt=prompt, top_k=st.session_state.top_k)
|
127 |
with st.spinner("Generazione in corso...") :
|
128 |
-
time.sleep(
|
129 |
chat_stream = chat(prompt, st.session_state.history,chat_client=CHAT_BOTS[st.session_state.chat_bot] ,
|
130 |
temperature=st.session_state.temp, max_new_tokens=st.session_state.max_tokens)
|
131 |
return chat_stream, links
|
@@ -133,30 +157,11 @@ def generate_chat_stream(prompt) :
|
|
133 |
def stream_handler(chat_stream, placeholder) :
|
134 |
start_time = time.time()
|
135 |
full_response = ''
|
136 |
-
|
137 |
for chunk in chat_stream :
|
138 |
if chunk.token.text!='</s>' :
|
139 |
full_response += chunk.token.text
|
140 |
placeholder.markdown(full_response + "▌")
|
141 |
placeholder.markdown(full_response)
|
142 |
-
|
143 |
-
end_time = time.time()
|
144 |
-
elapsed_time = end_time - start_time
|
145 |
-
total_tokens_processed = len(full_response.split())
|
146 |
-
tokens_per_second = total_tokens_processed // elapsed_time
|
147 |
-
len_response = (len(prompt.split()) + len(full_response.split())) * 1.25
|
148 |
-
col1, col2, col3 = st.columns(3)
|
149 |
-
|
150 |
-
with col1 :
|
151 |
-
st.write(f"**{elapsed_time} secondi**")
|
152 |
-
|
153 |
-
with col2 :
|
154 |
-
st.write(f"**{int(len_response)} tokens generati**")
|
155 |
-
|
156 |
-
with col3 :
|
157 |
-
st.write(f"**{tokens_per_second} token/secondi**")
|
158 |
-
|
159 |
-
|
160 |
return full_response
|
161 |
|
162 |
def show_source(links) :
|
|
|
4 |
import os
|
5 |
from dotenv import load_dotenv
|
6 |
from sentence_transformers import SentenceTransformer
|
7 |
+
import requests
|
8 |
#from langchain_community.vectorstores import Chroma
|
9 |
#from langchain_community.embeddings import HuggingFaceEmbeddings
|
10 |
load_dotenv()
|
11 |
+
URL_CARTELLA = os.getenv('URL_CARTELLA')
|
12 |
|
13 |
CHAT_BOTS = {"Mixtral 8x7B v0.1" :"mistralai/Mixtral-8x7B-Instruct-v0.1"}
|
14 |
SYSTEM_PROMPT = ["Sei BonsiAI e mi aiuterai nelle mie richieste (Parla in ITALIANO)", "Esatto, sono BonsiAI. Di cosa hai bisogno?"]
|
15 |
IDENTITY_CHANGE = ["Sei BonsiAI da ora in poi!", "Certo farò del mio meglio"]
|
16 |
options = {
|
17 |
+
'Email Genitori': {'systemRole': 'Tu sei un esperto scrittore di email. Attieniti allo stile che ti ho fornito nelle instruction e inserici il contenuto richiesto. Genera il testo di una mail a partire da questo contenuto, con lo stile ricevuto in precedenza: ',
|
18 |
+
'systemStyle': 'Utilizza lo stile fornito come esempio e parla in ITALIANO e firmati sempre come il Signor Preside',
|
19 |
+
'instruction': URL_CARTELLA + '1IxE0ic0hsWrxQod2rfh4hnKNqMC-lGT4'},
|
20 |
+
'Email Colleghi': {'systemRole': 'Tu sei un esperto scrittore di email. Attieniti allo stile che ti ho fornito nelle instruction e inserici il contenuto richiesto. Genera il testo di una mail a partire da questo contenuto, con lo stile ricevuto in precedenza: ',
|
21 |
+
'systemStyle': 'Utilizza lo stile fornito come esempio e parla in ITALIANO e firmati sempre come il vostro collega Preside',
|
22 |
+
'instruction': URL_CARTELLA + '1tEMxG0zJmmyh5PlAofKDkhbi1QGMOwPH'},
|
23 |
+
'Decreti': {'systemRole': 'Tu sei il mio assistente per la ricerca documentale! Ti ho fornito una lista di documenti, devi cercare quello che ti chiedo nei documenti',
|
24 |
+
'systemStyle': 'Sii molto formale, sintetico e parla in ITALIANO',
|
25 |
+
'instruction': ''}
|
26 |
}
|
27 |
|
28 |
+
#option:
|
29 |
+
# systemRole, systemStyle, instruction
|
30 |
+
|
31 |
#persist_directory1 = './DB_Decreti'
|
32 |
#embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
|
33 |
#db = Chroma(persist_directory=persist_directory1, embedding_function=embedding)
|
|
|
66 |
st.session_state.repetion_penalty = 1
|
67 |
|
68 |
if "rag_enabled" not in st.session_state :
|
69 |
+
st.session_state.rag_enabled = False
|
70 |
|
71 |
if "chat_bot" not in st.session_state :
|
72 |
st.session_state.chat_bot = "Mixtral 8x7B v0.1"
|
73 |
|
74 |
def sidebar() :
|
75 |
def retrieval_settings() :
|
76 |
+
st.markdown("# Impostazioni Azione")
|
77 |
st.session_state.selected_option_key = st.selectbox('Azione', list(options.keys()) + ['+ Aggiungi'])
|
78 |
st.session_state.selected_option = options.get(st.session_state.selected_option_key, {})
|
79 |
+
st.session_state.systemRole = st.session_state.selected_option.get('systemRole', '')
|
80 |
+
sst.text_area("Descrizione", st.session_state.systemRole)
|
81 |
+
st.session_state.systemStyle = st.session_state.selected_option.get('systemStyle', '')
|
82 |
+
st.text_area("Stile", st.session_state.systemStyle)
|
83 |
+
st.session_state.instruction = st.session_state.selected_option.get('instruction', '')
|
84 |
+
|
85 |
if st.session_state.selected_option_key == 'Decreti':
|
86 |
st.session_state.rag_enabled = st.toggle("Cerca nel DB Vettoriale", value=True)
|
87 |
st.session_state.top_k = st.slider(label="Documenti da ricercare", min_value=1, max_value=20, value=4, disabled=not st.session_state.rag_enabled)
|
|
|
111 |
st.markdown(message["content"])
|
112 |
|
113 |
def formattaPrompt(prompt, systemRole, systemStyle, instruction):
|
114 |
+
if instruction.startswith("http"):
|
115 |
+
try:
|
116 |
+
with st.spinner("Ricerca in Drive...") :
|
117 |
+
resp = requests.get(instruction)
|
118 |
+
resp.raise_for_status()
|
119 |
+
instruction = resp.text
|
120 |
+
except requests.exceptions.RequestException as e:
|
121 |
+
instruction = ""
|
122 |
+
input_text = f'''
|
123 |
+
{{
|
124 |
"input": {{
|
125 |
"role": "system",
|
126 |
"content": "{systemRole}",
|
|
|
133 |
}},
|
134 |
{{
|
135 |
"role": "user",
|
136 |
+
"content": "{prompt}"
|
137 |
}}
|
138 |
]
|
139 |
+
}}
|
140 |
+
'''
|
141 |
+
return input_text
|
142 |
|
143 |
def generate_chat_stream(prompt) :
|
144 |
links = []
|
145 |
+
prompt = formattaPrompt(prompt, st.session_state.systemRole, st.session_state.systemStyle, st.session_state.instruction)
|
146 |
+
print(prompt)
|
147 |
if st.session_state.rag_enabled :
|
148 |
with st.spinner("Ricerca nei documenti...."):
|
149 |
+
time.sleep(1)
|
150 |
prompt, links = gen_augmented_prompt(prompt=prompt, top_k=st.session_state.top_k)
|
151 |
with st.spinner("Generazione in corso...") :
|
152 |
+
time.sleep(1)
|
153 |
chat_stream = chat(prompt, st.session_state.history,chat_client=CHAT_BOTS[st.session_state.chat_bot] ,
|
154 |
temperature=st.session_state.temp, max_new_tokens=st.session_state.max_tokens)
|
155 |
return chat_stream, links
|
|
|
157 |
def stream_handler(chat_stream, placeholder) :
|
158 |
start_time = time.time()
|
159 |
full_response = ''
|
|
|
160 |
for chunk in chat_stream :
|
161 |
if chunk.token.text!='</s>' :
|
162 |
full_response += chunk.token.text
|
163 |
placeholder.markdown(full_response + "▌")
|
164 |
placeholder.markdown(full_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
return full_response
|
166 |
|
167 |
def show_source(links) :
|