Spaces:
Running
Running
lingyit1108
commited on
Commit
·
b034166
1
Parent(s):
5a39f92
added feedback button
Browse files- streamlit_app.py +25 -1
streamlit_app.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
import os
|
3 |
import pandas as pd
|
|
|
4 |
|
5 |
import openai
|
6 |
|
@@ -102,12 +105,16 @@ with st.sidebar:
|
|
102 |
|
103 |
if "init" not in st.session_state.keys():
|
104 |
st.session_state.init = {"warm_start": "No"}
|
|
|
105 |
|
106 |
# Store LLM generated responses
|
107 |
if "messages" not in st.session_state.keys():
|
108 |
st.session_state.messages = [{"role": "assistant",
|
109 |
"content": "How may I assist you today?"}]
|
110 |
|
|
|
|
|
|
|
111 |
# Display or clear chat messages
|
112 |
for message in st.session_state.messages:
|
113 |
with st.chat_message(message["role"]):
|
@@ -122,7 +129,11 @@ def clear_chat_history():
|
|
122 |
embedding_model=embedding_model,
|
123 |
system_content=system_content)
|
124 |
chat_engine.reset()
|
|
|
125 |
st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
|
|
|
|
|
|
|
126 |
|
127 |
@st.cache_resource
|
128 |
def get_document_object(input_files):
|
@@ -172,6 +183,10 @@ def generate_llm_response(prompt_input):
|
|
172 |
response = chat_engine.stream_chat(prompt_input)
|
173 |
return response
|
174 |
|
|
|
|
|
|
|
|
|
175 |
# Warm start
|
176 |
if st.session_state.init["warm_start"] == "No":
|
177 |
clear_chat_history()
|
@@ -196,5 +211,14 @@ if st.session_state.messages[-1]["role"] != "assistant":
|
|
196 |
full_response += token
|
197 |
placeholder.markdown(full_response)
|
198 |
placeholder.markdown(full_response)
|
|
|
199 |
message = {"role": "assistant", "content": full_response}
|
200 |
-
st.session_state.messages.append(message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from streamlit_feedback import streamlit_feedback
|
3 |
+
|
4 |
import os
|
5 |
import pandas as pd
|
6 |
+
import time
|
7 |
|
8 |
import openai
|
9 |
|
|
|
105 |
|
106 |
if "init" not in st.session_state.keys():
|
107 |
st.session_state.init = {"warm_start": "No"}
|
108 |
+
st.session_state.feedback = False
|
109 |
|
110 |
# Store LLM generated responses
|
111 |
if "messages" not in st.session_state.keys():
|
112 |
st.session_state.messages = [{"role": "assistant",
|
113 |
"content": "How may I assist you today?"}]
|
114 |
|
115 |
+
if "feedback_key" not in st.session_state:
|
116 |
+
st.session_state.feedback_key = 0
|
117 |
+
|
118 |
# Display or clear chat messages
|
119 |
for message in st.session_state.messages:
|
120 |
with st.chat_message(message["role"]):
|
|
|
129 |
embedding_model=embedding_model,
|
130 |
system_content=system_content)
|
131 |
chat_engine.reset()
|
132 |
+
|
133 |
st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
|
134 |
+
if st.sidebar.button("I want to submit a feedback!"):
|
135 |
+
st.session_state.feedback = True
|
136 |
+
st.session_state.feedback_key += 1 # overwrite feedback component
|
137 |
|
138 |
@st.cache_resource
|
139 |
def get_document_object(input_files):
|
|
|
183 |
response = chat_engine.stream_chat(prompt_input)
|
184 |
return response
|
185 |
|
186 |
+
def handle_feedback(user_response):
|
187 |
+
st.toast("✔️ Feedback received!")
|
188 |
+
st.session_state.feedback = False
|
189 |
+
|
190 |
# Warm start
|
191 |
if st.session_state.init["warm_start"] == "No":
|
192 |
clear_chat_history()
|
|
|
211 |
full_response += token
|
212 |
placeholder.markdown(full_response)
|
213 |
placeholder.markdown(full_response)
|
214 |
+
|
215 |
message = {"role": "assistant", "content": full_response}
|
216 |
+
st.session_state.messages.append(message)
|
217 |
+
|
218 |
+
if st.session_state.feedback:
|
219 |
+
result = streamlit_feedback(
|
220 |
+
feedback_type="thumbs",
|
221 |
+
optional_text_label="[Optional] Please provide an explanation",
|
222 |
+
on_submit=handle_feedback,
|
223 |
+
key=f"feedback_{st.session_state.feedback_key}"
|
224 |
+
)
|