Spaces:
Sleeping
Sleeping
import os | |
import time | |
import json | |
import openai | |
import gradio as gr | |
from datetime import datetime | |
from openai.error import RateLimitError, APIConnectionError, Timeout, APIError, \ | |
ServiceUnavailableError, InvalidRequestError | |
from huggingface_hub import hf_hub_download, HfApi | |
openai.api_key = os.environ.get('API_KEY') | |
score_parameters = [ | |
'Personalidad', 'Intereses', 'Lenguaje/Estilo', | |
'Autenticidad', 'Habilidad de conversaci贸n', | |
'Marca/Producto', 'Identificaci贸n', 'Experiencia de uso', | |
'Recomendacion', 'Conversaci贸n organica' | |
] | |
authors = ['Sofia', 'Eliza', 'Sindy', 'Carlos', 'Andres', 'Adriana', 'Carolina', 'Valeria'] | |
models = ["gpt-4"] | |
temperature_values = [0.2, 0.8, 1.0] | |
def innit_bot(): | |
""" | |
Initialize the bot by adding the prompt from the txt file to the messages history | |
""" | |
with open('prompt.txt', encoding='utf-8') as file: | |
prompt = file.read() | |
message_history = [{"role": "system", "content": prompt}] | |
return message_history | |
def make_visible(): | |
""" | |
Makes visible the returned elements | |
""" | |
return ( | |
gr.Chatbot.update(visible=True), | |
gr.Textbox.update(visible=True), | |
gr.Row.update(visible=True)) | |
def make_noninteractive(): | |
""" | |
Makes no interactive the returned elements | |
""" | |
return ( | |
gr.Dropdown.update(interactive=False), | |
gr.Radio.update(interactive=False)) | |
def call_api(model: gr.Dropdown, msg_history: gr.State, temperature: gr.State): | |
""" | |
Returns the API's response | |
""" | |
response = openai.ChatCompletion.create( | |
model=model, | |
messages=msg_history, | |
temperature=temperature | |
) | |
return response | |
def handle_call(model: gr.Dropdown, msg_history: gr.State, temperature: gr.State): | |
""" | |
Returns the response and waiting time of the AI. It also handles the possible errors | |
""" | |
tries = 0 | |
max_tries = 3 | |
while True: | |
try: | |
start_time = time.time() | |
response = call_api(model, msg_history, temperature) | |
end_time = time.time() | |
break | |
except InvalidRequestError as e: | |
print(e) | |
response = 'Ya no tienes mas tokens disponibles. Envia lo que tengas hasta el momento e inicia otro chat' | |
raise gr.Error(response) | |
except (RateLimitError, APIError, Timeout, APIConnectionError, ServiceUnavailableError) as e: | |
print(e) | |
if tries == max_tries: | |
response = "Despues de muchos intentos, no se pudo completar la comunicacion con OpenAI. " \ | |
"Envia lo que tengas hasta el momento e inicia un chat nuevo dentro de unos minutos." | |
raise gr.Error(response) | |
tries += 1 | |
time.sleep(60) | |
needed_time = end_time - start_time | |
return response, needed_time | |
def get_ai_answer(msg: str, model: gr.Dropdown, msg_history: gr.State, temperature: gr.State): | |
""" | |
Returns the response given by the model, all the message history so far and the seconds | |
the api took to retrieve such response. Both depend on the model | |
""" | |
msg_history.append({"role": "user", "content": msg}) | |
response, needed_time = handle_call(model, msg_history, temperature) | |
AI_response = response["choices"][0]["message"]["content"] | |
msg_history.append({'role': 'assistant', 'content': AI_response}) | |
return AI_response, msg_history, needed_time | |
def get_answer( | |
msg: str, msg_history: gr.State, | |
chatbot_history: gr.Chatbot, waiting_time: gr.State, | |
temperature: gr.State, model: gr.Dropdown): | |
""" | |
Cleans msg box, adds the new message to the message history, | |
gets the answer from the bot and adds it to the chatbot history | |
and gets the time needed to get such answer and saves it | |
""" | |
# Get bot answer (output), messages history and waiting time | |
AI_response, msg_history, needed_time = get_ai_answer(msg, model, msg_history, temperature) | |
# Save waiting time | |
waiting_time.append(needed_time) | |
# Save output in the chat | |
chatbot_history.append((msg, AI_response)) | |
return "", msg_history, chatbot_history, waiting_time | |
def save_scores( | |
author: gr.Dropdown, temperature: gr.State, | |
history: gr.Chatbot, waiting_time: gr.State, | |
model: gr.Dropdown, opinion: gr.Textbox, *score_values): | |
""" | |
Saves the scores and chat's info into the json file | |
""" | |
# Get the score of each parameter | |
scores = dict() | |
for parameter, score in zip(score_parameters, score_values): | |
# Check the score is a valid value if not, raise Error | |
if score is None: | |
raise gr.Error('Asegurese de haber seleccionado al menos 1 opcion en cada categoria') | |
scores[parameter] = score | |
# Get all the messages including their reaction | |
chat = [] | |
for conversation in history: | |
info = { | |
'message': conversation[0], | |
'answer': conversation[1], | |
'waiting': waiting_time.pop(0) | |
} | |
chat.append(info) | |
date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
with open('prompt.txt', encoding='utf-8') as file: | |
prompt = file.read() | |
# Save the info | |
session = dict( | |
prompt=prompt, | |
temperature=temperature, | |
scores=scores, | |
opinion=opinion, | |
chat=chat, | |
author=author, | |
model=model, | |
date=date | |
) | |
# Open the file, add the new info and save it | |
hf_hub_download( | |
repo_id=os.environ.get('DATA'), repo_type='dataset', filename="data.json", token=os.environ.get('HUB_TOKEN'), | |
local_dir="./" | |
) | |
with open('data.json', 'r') as infile: | |
past_sessions = json.load(infile) | |
# Add the new info | |
past_sessions['sessions'].append(session) | |
with open('data.json', 'w', encoding='utf-8') as outfile: | |
json.dump(past_sessions, outfile, indent=4, ensure_ascii=False) | |
# Save the updated file | |
api = HfApi(token=os.environ.get('HUB_TOKEN')) | |
api.upload_file( | |
path_or_fileobj="data.json", | |
path_in_repo="data.json", | |
repo_id=os.environ.get('DATA'), | |
) | |
# Return a confirmation message | |
return 'Done' | |
with gr.Blocks() as app: | |
msg_history = gr.State() # Messages with the format used by OpenAI | |
waiting_time = gr.State([]) # Seconds needed to get each answer | |
with gr.Tab('Test Chats'): | |
with gr.Row(): | |
model = gr.Textbox(value=models[0], label='Model', interactive=False) | |
author = gr.Dropdown(authors, value=authors[0], label='Author', interactive=True) | |
temperature = gr.Radio(temperature_values, label="Randomness", value=0.2) | |
chat_btn = gr.Button(value='Start chat') | |
# ------------------------------------- Chat ------------------------------------------- | |
chatbot = gr.Chatbot(label='Chat', visible=False) | |
message = gr.Textbox(label='Message', visible=False) | |
# ------------------------------------- Result's tab --------------------------------------- | |
with gr.Tab('Save results'): | |
with gr.Row(visible=False) as scores_row: | |
with gr.Column(scale=75): | |
with gr.Row(): | |
scores = [ | |
gr.Radio(choices=['Aprovado', 'No aprovado'], label=parameter) | |
for parameter in score_parameters | |
] | |
with gr.Column(scale=25): | |
opinion_box = gr.Textbox(label='Opinion') | |
scores_btn = gr.Button(value='Send scores') | |
scores_box = gr.Textbox(label='Status', interactive=False) | |
# -------------------------------------- Actions ----------------------------------------- | |
chat_btn.click( | |
innit_bot, None, [msg_history] | |
).then( | |
make_noninteractive, None, [author, temperature] | |
).then( | |
make_visible, None, [ | |
chatbot, message, scores_row] | |
) | |
message.submit( | |
get_answer, | |
[message, msg_history, chatbot, waiting_time, temperature, model], | |
[message, msg_history, chatbot, waiting_time]) | |
scores_btn.click( | |
save_scores, | |
[author, temperature, chatbot, waiting_time, model, opinion_box] + scores, | |
scores_box) | |
app.launch(debug=True, auth=(os.environ.get('USERNAME'), os.environ.get('PASSWORD'))) | |