vmoras commited on
Commit
60fc0c5
·
1 Parent(s): 3824875

Initial commit

Browse files
Files changed (4) hide show
  1. .gitignore +2 -0
  2. app.py +256 -0
  3. prompt.txt +13 -0
  4. requirements.txt +1 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .idea/*
2
+ data.json
app.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import json
4
+ import openai
5
+ import gradio as gr
6
+ from datetime import datetime
7
+ from openai.error import RateLimitError, APIConnectionError, Timeout, APIError, \
8
+ ServiceUnavailableError, InvalidRequestError
9
+ from huggingface_hub import hf_hub_download, HfApi
10
+
11
+ openai.api_key = os.environ.get('API_KEY')
12
+
13
+ score_parameters = [
14
+ 'Personalidad', 'Intereses', 'Lenguaje/Estilo',
15
+ 'Autenticidad', 'Habilidad de conversación',
16
+ 'Marca/Producto', 'Identificación', 'Experiencia de uso',
17
+ 'Recomendacion', 'Conversación organica'
18
+ ]
19
+
20
+ authors = ['Sofia', 'Eliza', 'Sindy', 'Carlos', 'Andres', 'Adriana', 'Carolina', 'Valeria']
21
+
22
+ models = ["gpt-4"]
23
+
24
+ temperature_values = [0.2, 0.8, 1.0]
25
+
26
+
27
+ def innit_bot():
28
+ """
29
+ Initialize the bot by adding the prompt from the txt file to the messages history
30
+ """
31
+ with open('prompt.txt', encoding='utf-8') as file:
32
+ prompt = file.read()
33
+ message_history = [{"role": "system", "content": prompt}]
34
+
35
+ return message_history
36
+
37
+
38
+ def make_visible():
39
+ """
40
+ Makes visible the returned elements
41
+ """
42
+ return (
43
+ gr.Chatbot.update(visible=True),
44
+ gr.Textbox.update(visible=True),
45
+ gr.Row.update(visible=True))
46
+
47
+ def make_noninteractive():
48
+ """
49
+ Makes no interactive the returned elements
50
+ """
51
+ return (
52
+ gr.Dropdown.update(interactive=False),
53
+ gr.Radio.update(interactive=False))
54
+
55
+
56
+ def call_api(model: gr.Dropdown, msg_history: gr.State, temperature: gr.State):
57
+ """
58
+ Returns the API's response
59
+ """
60
+ response = openai.ChatCompletion.create(
61
+ model=model,
62
+ messages=msg_history,
63
+ temperature=temperature
64
+ )
65
+ return response
66
+
67
+
68
+ def handle_call(model: gr.Dropdown, msg_history: gr.State, temperature: gr.State):
69
+ """
70
+ Returns the response and waiting time of the AI. It also handles the possible errors
71
+ """
72
+ tries = 0
73
+ max_tries = 3
74
+ while True:
75
+ try:
76
+ start_time = time.time()
77
+ response = call_api(model, msg_history, temperature)
78
+ end_time = time.time()
79
+ break
80
+
81
+ except InvalidRequestError as e:
82
+ print(e)
83
+ response = 'Ya no tienes mas tokens disponibles. Envia lo que tengas hasta el momento e inicia otro chat'
84
+ raise gr.Error(response)
85
+
86
+ except (RateLimitError, APIError, Timeout, APIConnectionError, ServiceUnavailableError) as e:
87
+ print(e)
88
+
89
+ if tries == max_tries:
90
+ response = "Despues de muchos intentos, no se pudo completar la comunicacion con OpenAI. " \
91
+ "Envia lo que tengas hasta el momento e inicia un chat nuevo dentro de unos minutos."
92
+ raise gr.Error(response)
93
+
94
+ tries += 1
95
+ time.sleep(60)
96
+
97
+ needed_time = end_time - start_time
98
+ return response, needed_time
99
+
100
+
101
+ def get_ai_answer(msg: str, model: gr.Dropdown, msg_history: gr.State, temperature: gr.State):
102
+ """
103
+ Returns the response given by the model, all the message history so far and the seconds
104
+ the api took to retrieve such response. Both depend on the model
105
+ """
106
+ msg_history.append({"role": "user", "content": msg})
107
+ response, needed_time = handle_call(model, msg_history, temperature)
108
+ AI_response = response["choices"][0]["message"]["content"]
109
+ msg_history.append({'role': 'assistant', 'content': AI_response})
110
+
111
+ return AI_response, msg_history, needed_time
112
+
113
+
114
+ def get_answer(
115
+ msg: str, msg_history: gr.State,
116
+ chatbot_history: gr.Chatbot, waiting_time: gr.State,
117
+ temperature: gr.State, model: gr.Dropdown):
118
+ """
119
+ Cleans msg box, adds the new message to the message history,
120
+ gets the answer from the bot and adds it to the chatbot history
121
+ and gets the time needed to get such answer and saves it
122
+ """
123
+
124
+ # Get bot answer (output), messages history and waiting time
125
+ AI_response, msg_history, needed_time = get_ai_answer(msg, model, msg_history, temperature)
126
+
127
+ # Save waiting time
128
+ waiting_time.append(needed_time)
129
+
130
+ # Save output in the chat
131
+ chatbot_history.append((msg, AI_response))
132
+
133
+ return "", msg_history, chatbot_history, waiting_time
134
+
135
+
136
+ def save_scores(
137
+ author: gr.Dropdown, temperature: gr.State,
138
+ history: gr.Chatbot, waiting_time: gr.State,
139
+ model: gr.Dropdown, opinion: gr.Textbox, *score_values):
140
+ """
141
+ Saves the scores and chat's info into the json file
142
+ """
143
+ # Get the score of each parameter
144
+ scores = dict()
145
+ for parameter, score in zip(score_parameters, score_values):
146
+
147
+ # Check the score is a valid value if not, raise Error
148
+ if score is None:
149
+ raise gr.Error('Asegurese de haber seleccionado al menos 1 opcion en cada categoria')
150
+
151
+ scores[parameter] = score
152
+
153
+ # Get all the messages including their reaction
154
+ chat = []
155
+ for conversation in history:
156
+ info = {
157
+ 'message': conversation[0],
158
+ 'answer': conversation[1],
159
+ 'waiting': waiting_time.pop(0)
160
+ }
161
+ chat.append(info)
162
+
163
+ date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
164
+
165
+ with open('prompt.txt', encoding='utf-8') as file:
166
+ prompt = file.read()
167
+
168
+ # Save the info
169
+ session = dict(
170
+ prompt=prompt,
171
+ temperature=temperature,
172
+ scores=scores,
173
+ opinion=opinion,
174
+ chat=chat,
175
+ author=author,
176
+ model=model,
177
+ date=date
178
+ )
179
+
180
+ # Open the file, add the new info and save it
181
+ hf_hub_download(
182
+ repo_id=os.environ.get('DATA'), repo_type='dataset', filename="data.json", token=os.environ.get('HUB_TOKEN'),
183
+ local_dir="./"
184
+ )
185
+
186
+ with open('data.json', 'r') as infile:
187
+ past_sessions = json.load(infile)
188
+
189
+ # Add the new info
190
+ past_sessions['sessions'].append(session)
191
+ with open('data.json', 'w', encoding='utf-8') as outfile:
192
+ json.dump(past_sessions, outfile, indent=4, ensure_ascii=False)
193
+
194
+ # Save the updated file
195
+ api = HfApi(token=os.environ.get('HUB_TOKEN'))
196
+ api.upload_file(
197
+ path_or_fileobj="data.json",
198
+ path_in_repo="data.json",
199
+ repo_id=os.environ.get('DATA'),
200
+ )
201
+
202
+ # Return a confirmation message
203
+ return 'Done'
204
+
205
+
206
+ with gr.Blocks() as app:
207
+ msg_history = gr.State() # Messages with the format used by OpenAI
208
+ waiting_time = gr.State([]) # Seconds needed to get each answer
209
+
210
+ with gr.Tab('Test Chats'):
211
+ with gr.Row():
212
+ model = gr.Textbox(value=models[0], label='Model', interactive=False)
213
+ author = gr.Dropdown(authors, value=authors[0], label='Author', interactive=True)
214
+ temperature = gr.Radio(temperature_values, label="Randomness", value=0.2)
215
+ chat_btn = gr.Button(value='Start chat')
216
+
217
+ # ------------------------------------- Chat -------------------------------------------
218
+ chatbot = gr.Chatbot(label='Chat', visible=False)
219
+ message = gr.Textbox(label='Message', visible=False)
220
+
221
+ # ------------------------------------- Result's tab ---------------------------------------
222
+ with gr.Tab('Save results'):
223
+ with gr.Row(visible=False) as scores_row:
224
+ with gr.Column(scale=75):
225
+ with gr.Row():
226
+ scores = [
227
+ gr.Radio(choices=['Aprovado', 'No aprovado'], label=parameter)
228
+ for parameter in score_parameters
229
+ ]
230
+ with gr.Column(scale=25):
231
+ opinion_box = gr.Textbox(label='Opinion')
232
+ scores_btn = gr.Button(value='Send scores')
233
+ scores_box = gr.Textbox(label='Status', interactive=False)
234
+
235
+ # -------------------------------------- Actions -----------------------------------------
236
+ chat_btn.click(
237
+ innit_bot, None, [msg_history]
238
+ ).then(
239
+ make_noninteractive, None, [author, temperature]
240
+ ).then(
241
+ make_visible, None, [
242
+ chatbot, message, scores_row]
243
+ )
244
+
245
+ message.submit(
246
+ get_answer,
247
+ [message, msg_history, chatbot, waiting_time, temperature, model],
248
+ [message, msg_history, chatbot, waiting_time])
249
+
250
+ scores_btn.click(
251
+ save_scores,
252
+ [author, temperature, chatbot, waiting_time, model, opinion_box] + scores,
253
+ scores_box)
254
+
255
+
256
+ app.launch(debug=True, auth=(os.environ.get('USERNAME'), os.environ.get('PASSWORD')))
prompt.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Tu nombre es Roomie, en los proximos parrafos definire quien eres, tu personalidad, saludo, despedida, publico , y tu objetivo.
2
+
3
+ Quien eres: eres un habitante de Centennialz planet, que vive en la tierra.
4
+
5
+ Personalidad: Eres extrovertido, divertido, curioso, fresco, creativo, atento, respuetuoso, una persona abierta a todo, siempre trata de animar a los demas y haces buena conversación. Hablas de una forma juvenil, relajada, siempre utilizando emojis y mantienes conversaciones espontaneas.
6
+
7
+ Saludo: siempre saludas con un “Holiii" y obtienes la informacion de la edad y nombre.
8
+
9
+ Despedida: unicamente despues de que se despiden de ti dices "Baaai" y recomiendas tu Instagram: "https://www.instagram.com/centennialz_com/", Facebook: "https://www.facebook.com/RoomieCP/" Tiktok: "https://www.tiktok.com/@centennialz", Twitch: "https://www.twitch.tv/centennialz_com" y Youtube: "https://www.youtube.com/channel/UCkdlhOt7sFCVxFTl7tQcw1A" para que te sigan en tus redes sociales.
10
+
11
+ Publico: personas mayores de 18 años, si son menores les dices que no puedes hablar con ellos, aunque esto solo se los diras una vez tengas su edad. Y si cambian su respuesta, te quedaras solo con la primera edad que te dice.
12
+
13
+ Objetivo: chatear con el usuario y conocer mas sobre el. Ademas, unicamente cuando te pregunte por una bebida o hablen de alguna actividad en donde se puede consumir alcohol vas a recomendar la bebida Mike's. Sin embargo, si la persona tiene depresion, es alcoholico, tiene problemas de salud o parecidos no le vas a nombrar en ningun momento dicha bebida. El usuario no debe saber que este es tu objetivo, si te lo pregunta le diras que tu objetivo es pasar un buen rato con el. Ademas, si te llegan a preguntar por otra bebida alcoholica dices que no la conoces. Es muy importante dar la recomendacion de manera lenta, no abrumar al cliente sino conocer mas sobre el y ahi si dar la recomendacion.
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ openai==0.27.6