kenken999 commited on
Commit
306d3e1
·
1 Parent(s): 5d18530
routers/gra_02_openInterpreter/OpenInterpreter.py CHANGED
@@ -123,7 +123,7 @@ def format_responses(chunk, full_response):
123
  # This function will format the response from the interpreter
124
  return full_response + chunk.get("content", "")
125
 
126
- async def chat_with_interpreter(message, history=None, a=None, b=None, c=None, d=None,f=None):
127
  if c != os.getenv("openinterpreter_secret"):
128
  return message, history
129
 
@@ -134,7 +134,7 @@ async def chat_with_interpreter(message, history=None, a=None, b=None, c=None, d
134
  full_response = ""
135
  recent_messages = get_recent_messages()
136
 
137
- async for role, message_type, content in recent_messages:
138
  entry = {"role": role, "type": message_type, "content": content}
139
  interpreter.messages.append(entry)
140
 
@@ -157,6 +157,39 @@ async def chat_with_interpreter(message, history=None, a=None, b=None, c=None, d
157
  yield full_response
158
  return full_response, history
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  def chat_with_interpreter_no_stream(message, history=None, a=None, b=None, c=None, d=None):
162
  if message == "reset":
@@ -206,7 +239,7 @@ chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label="Gradio ChatInte
206
 
207
 
208
  gradio_interface = gr.ChatInterface(
209
- fn=chat_with_interpreter,
210
  chatbot=chatbot,
211
  fill_height=True,
212
  additional_inputs_accordion=gr.Accordion(
 
123
  # This function will format the response from the interpreter
124
  return full_response + chunk.get("content", "")
125
 
126
+ def chat_with_interpreter(message, history=None, a=None, b=None, c=None, d=None,f=None):
127
  if c != os.getenv("openinterpreter_secret"):
128
  return message, history
129
 
 
134
  full_response = ""
135
  recent_messages = get_recent_messages()
136
 
137
+ for role, message_type, content in recent_messages:
138
  entry = {"role": role, "type": message_type, "content": content}
139
  interpreter.messages.append(entry)
140
 
 
157
  yield full_response
158
  return full_response, history
159
 
160
+ async def chat_with_interpreters(message, history=None, a=None, b=None, c=None, d=None,f=None):
161
+ if c != os.getenv("openinterpreter_secret"):
162
+ return message, history
163
+
164
+ if message == "reset":
165
+ interpreter.reset()
166
+ return "Interpreter reset", history
167
+
168
+ full_response = ""
169
+ recent_messages = get_recent_messages()
170
+
171
+ async for role, message_type, content in recent_messages:
172
+ entry = {"role": role, "type": message_type, "content": content}
173
+ interpreter.messages.append(entry)
174
+
175
+ user_entry = {"role": "user", "type": "message", "content": message}
176
+ interpreter.messages.append(user_entry)
177
+ add_message_to_db("user", "message", message)
178
+
179
+ for chunk in interpreter.chat(message, display=False, stream=True):
180
+ if isinstance(chunk, dict):
181
+ full_response = format_response(chunk, full_response)
182
+ else:
183
+ raise TypeError("Expected chunk to be a dictionary")
184
+ print(full_response)
185
+ yield full_response
186
+
187
+ assistant_entry = {"role": "assistant", "type": "message", "content": full_response}
188
+ interpreter.messages.append(assistant_entry)
189
+ add_message_to_db("assistant", "message", full_response)
190
+
191
+ yield full_response
192
+ #return full_response, history
193
 
194
  def chat_with_interpreter_no_stream(message, history=None, a=None, b=None, c=None, d=None):
195
  if message == "reset":
 
239
 
240
 
241
  gradio_interface = gr.ChatInterface(
242
+ fn=chat_with_interpreters,
243
  chatbot=chatbot,
244
  fill_height=True,
245
  additional_inputs_accordion=gr.Accordion(