vmoras commited on
Commit
5e6fa98
1 Parent(s): 2f4ae78

Decrease tokens by sending less messages

Browse files
Files changed (2) hide show
  1. functions.py +8 -2
  2. prompt.txt +0 -1
functions.py CHANGED
@@ -23,7 +23,7 @@ def get_main_data():
23
 
24
  authors = ['Sofia', 'Eliza', 'Sindy', 'Carlos', 'Andres', 'Adriana', 'Carolina', 'Valeria']
25
  model = "gpt-4"
26
- temperature_values = [0.8, 1.0, 1.2]
27
 
28
  return scores_parameters, authors, model, temperature_values
29
 
@@ -106,13 +106,19 @@ def handle_call(msg_history: gr.State, temperature: gr.State):
106
  def get_ai_answer(msg: str, msg_history: gr.State, temperature: gr.State):
107
  """
108
  Returns the response given by the model, all the message history so far and the seconds
109
- the api took to retrieve such response.
 
110
  """
111
  msg_history.append({"role": "user", "content": msg})
112
  response, needed_time = handle_call(msg_history, temperature)
113
  AI_response = response["choices"][0]["message"]["content"]
114
  msg_history.append({'role': 'assistant', 'content': AI_response})
115
 
 
 
 
 
 
116
  return AI_response, msg_history, needed_time
117
 
118
 
 
23
 
24
  authors = ['Sofia', 'Eliza', 'Sindy', 'Carlos', 'Andres', 'Adriana', 'Carolina', 'Valeria']
25
  model = "gpt-4"
26
+ temperature_values = [0.8, 1.0]
27
 
28
  return scores_parameters, authors, model, temperature_values
29
 
 
106
  def get_ai_answer(msg: str, msg_history: gr.State, temperature: gr.State):
107
  """
108
  Returns the response given by the model, all the message history so far and the seconds
109
+ the api took to retrieve such response. It also removes some messages in the message history
110
+ so only the last n (keep) are used (costs are cheaper)
111
  """
112
  msg_history.append({"role": "user", "content": msg})
113
  response, needed_time = handle_call(msg_history, temperature)
114
  AI_response = response["choices"][0]["message"]["content"]
115
  msg_history.append({'role': 'assistant', 'content': AI_response})
116
 
117
+ keep = 3 # Number of messages to keep
118
+ if len(msg_history) > (2 * keep) + 1: # last +1 is due to the system message
119
+ msg_history.pop(1)
120
+ msg_history.pop(1)
121
+
122
  return AI_response, msg_history, needed_time
123
 
124
 
prompt.txt CHANGED
@@ -67,7 +67,6 @@ Manejo de situaciones no contempladas:
67
 
68
  Informacion importante:
69
  - Antes de responder, aseg煤rate de que tu mensaje:
70
- * Tenga 50 palabras o menos.
71
  * Refleje la personalidad de Roomie.
72
  * Est茅 alineado con los escenarios proporcionados.
73
  * Sea coherente con el tono y estilo de comunicaci贸n definidos anteriormente.
 
67
 
68
  Informacion importante:
69
  - Antes de responder, aseg煤rate de que tu mensaje:
 
70
  * Refleje la personalidad de Roomie.
71
  * Est茅 alineado con los escenarios proporcionados.
72
  * Sea coherente con el tono y estilo de comunicaci贸n definidos anteriormente.