Minor Fixes
Browse files
app.py
CHANGED
@@ -21,6 +21,7 @@ from insights_bullet_chain import insights_bullet_chain
|
|
21 |
from synopsis_chain import synopsis_chain
|
22 |
from custom_exceptions import InvalidArgumentError, InvalidCommandError
|
23 |
from openai_configuration import openai_parser
|
|
|
24 |
|
25 |
st.set_page_config(layout="wide")
|
26 |
|
@@ -36,14 +37,14 @@ Here's a quick guide to getting started with me:
|
|
36 |
| `/configure --key <api key> --model <model>` | Configure the OpenAI API key and model for our conversation. |
|
37 |
| `/add-papers <list of urls>` | Upload and process documents for our conversation. |
|
38 |
| `/library` | View an index of processed documents to easily navigate your research. |
|
39 |
-
| `/view-
|
40 |
| `/session-expense` | Calculate the cost of our conversation, ensuring transparency in resource usage. |
|
41 |
| `/export` | Download conversation data for your records or further analysis. |
|
42 |
-
| `/auto-insight <
|
43 |
-
| `/condense-summary <
|
44 |
-
| `/insight-bullets <list of
|
45 |
-
| `/paper-synopsis <
|
46 |
-
| `/deep-dive [<list of
|
47 |
|
48 |
|
49 |
<br>
|
@@ -75,7 +76,7 @@ def index_documents_wrapper(inputs=None):
|
|
75 |
|
76 |
def view_document_wrapper(inputs):
|
77 |
response = st.session_state.documents[inputs].page_content
|
78 |
-
st.session_state.messages.append((f"/view-
|
79 |
return (response, "identity")
|
80 |
|
81 |
|
@@ -91,6 +92,7 @@ def calculate_cost_wrapper(inputs=None):
|
|
91 |
|
92 |
|
93 |
def download_conversation_wrapper(inputs=None):
|
|
|
94 |
conversation_data = json.dumps(
|
95 |
{
|
96 |
"document_urls": (
|
@@ -180,9 +182,9 @@ def query_llm_wrapper(inputs):
|
|
180 |
|
181 |
|
182 |
def chain_of_density_wrapper(inputs):
|
183 |
-
if inputs ==
|
184 |
-
raise InvalidArgumentError("Please provide
|
185 |
-
document = st.session_state.documents[
|
186 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
187 |
with get_openai_callback() as cb:
|
188 |
summary = chain_of_density_chain(llm).invoke({"paper": document})
|
@@ -199,9 +201,9 @@ def chain_of_density_wrapper(inputs):
|
|
199 |
|
200 |
|
201 |
def synopsis_wrapper(inputs):
|
202 |
-
if inputs ==
|
203 |
-
raise InvalidArgumentError("Please provide
|
204 |
-
document = st.session_state.documents[
|
205 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
206 |
with get_openai_callback() as cb:
|
207 |
summary = synopsis_chain(llm).invoke({"paper": document})
|
@@ -218,8 +220,8 @@ def synopsis_wrapper(inputs):
|
|
218 |
|
219 |
|
220 |
def insights_bullet_wrapper(inputs):
|
221 |
-
if inputs ==
|
222 |
-
raise InvalidArgumentError("Please provide
|
223 |
document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
|
224 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
225 |
with get_openai_callback() as cb:
|
@@ -237,9 +239,9 @@ def insights_bullet_wrapper(inputs):
|
|
237 |
|
238 |
|
239 |
def auto_qa_chain_wrapper(inputs):
|
240 |
-
if inputs ==
|
241 |
-
raise InvalidArgumentError("Please provide
|
242 |
-
document = st.session_state.documents[
|
243 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
244 |
auto_qa_conversation = []
|
245 |
with get_openai_callback() as cb:
|
@@ -310,15 +312,15 @@ if __name__ == "__main__":
|
|
310 |
("/configure", str, configure_openai_wrapper),
|
311 |
("/add-papers", list, process_documents_wrapper),
|
312 |
("/library", None, index_documents_wrapper),
|
313 |
-
("/view-
|
314 |
("/session-expense", None, calculate_cost_wrapper),
|
315 |
("/export", None, download_conversation_wrapper),
|
316 |
("/help-me", None, lambda x: (welcome_message, "identity")),
|
317 |
-
("/auto-insight",
|
318 |
("/deep-dive", str, query_llm_wrapper),
|
319 |
-
("/condense-summary",
|
320 |
("/insight-bullets", list, insights_bullet_wrapper),
|
321 |
-
("/paper-synopsis",
|
322 |
]
|
323 |
command_center = CommandCenter(
|
324 |
default_input_type=str,
|
@@ -334,7 +336,9 @@ if __name__ == "__main__":
|
|
334 |
}
|
335 |
jsonify_functions = {
|
336 |
"identity": lambda x: x,
|
337 |
-
"dataframe": lambda x:
|
|
|
|
|
338 |
"reponse_with_citations": lambda x: x,
|
339 |
}
|
340 |
boot(command_center, formating_functions)
|
|
|
21 |
from synopsis_chain import synopsis_chain
|
22 |
from custom_exceptions import InvalidArgumentError, InvalidCommandError
|
23 |
from openai_configuration import openai_parser
|
24 |
+
import pickle
|
25 |
|
26 |
st.set_page_config(layout="wide")
|
27 |
|
|
|
37 |
| `/configure --key <api key> --model <model>` | Configure the OpenAI API key and model for our conversation. |
|
38 |
| `/add-papers <list of urls>` | Upload and process documents for our conversation. |
|
39 |
| `/library` | View an index of processed documents to easily navigate your research. |
|
40 |
+
| `/view-snip <snippet id>` | View the content of a specific snnippet. |
|
41 |
| `/session-expense` | Calculate the cost of our conversation, ensuring transparency in resource usage. |
|
42 |
| `/export` | Download conversation data for your records or further analysis. |
|
43 |
+
| `/auto-insight <list of snippet ids>` | Automatically generate questions and answers for the paper. |
|
44 |
+
| `/condense-summary <list of snippet ids>` | Generate increasingly concise, entity-dense summaries of the paper. |
|
45 |
+
| `/insight-bullets <list of snippet ids>` | Extract and summarize key insights, methods, results, and conclusions. |
|
46 |
+
| `/paper-synopsis <list of snippet ids>` | Generate a synopsis of the paper. |
|
47 |
+
| `/deep-dive [<list of snippet ids>] <query>` | Query me with a specific context. |
|
48 |
|
49 |
|
50 |
<br>
|
|
|
76 |
|
77 |
def view_document_wrapper(inputs):
|
78 |
response = st.session_state.documents[inputs].page_content
|
79 |
+
st.session_state.messages.append((f"/view-snip {inputs}", response, "identity"))
|
80 |
return (response, "identity")
|
81 |
|
82 |
|
|
|
92 |
|
93 |
|
94 |
def download_conversation_wrapper(inputs=None):
|
95 |
+
pickle.dump(st.session_state.messages, open("conversation_data.pkl", "wb"))
|
96 |
conversation_data = json.dumps(
|
97 |
{
|
98 |
"document_urls": (
|
|
|
182 |
|
183 |
|
184 |
def chain_of_density_wrapper(inputs):
|
185 |
+
if inputs == []:
|
186 |
+
raise InvalidArgumentError("Please provide snippet ids")
|
187 |
+
document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
|
188 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
189 |
with get_openai_callback() as cb:
|
190 |
summary = chain_of_density_chain(llm).invoke({"paper": document})
|
|
|
201 |
|
202 |
|
203 |
def synopsis_wrapper(inputs):
|
204 |
+
if inputs == []:
|
205 |
+
raise InvalidArgumentError("Please provide snippet ids")
|
206 |
+
document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
|
207 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
208 |
with get_openai_callback() as cb:
|
209 |
summary = synopsis_chain(llm).invoke({"paper": document})
|
|
|
220 |
|
221 |
|
222 |
def insights_bullet_wrapper(inputs):
|
223 |
+
if inputs == []:
|
224 |
+
raise InvalidArgumentError("Please provide snippet ids")
|
225 |
document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
|
226 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
227 |
with get_openai_callback() as cb:
|
|
|
239 |
|
240 |
|
241 |
def auto_qa_chain_wrapper(inputs):
|
242 |
+
if inputs == []:
|
243 |
+
raise InvalidArgumentError("Please provide snippet ids")
|
244 |
+
document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
|
245 |
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
246 |
auto_qa_conversation = []
|
247 |
with get_openai_callback() as cb:
|
|
|
312 |
("/configure", str, configure_openai_wrapper),
|
313 |
("/add-papers", list, process_documents_wrapper),
|
314 |
("/library", None, index_documents_wrapper),
|
315 |
+
("/view-snip", str, view_document_wrapper),
|
316 |
("/session-expense", None, calculate_cost_wrapper),
|
317 |
("/export", None, download_conversation_wrapper),
|
318 |
("/help-me", None, lambda x: (welcome_message, "identity")),
|
319 |
+
("/auto-insight", list, auto_qa_chain_wrapper),
|
320 |
("/deep-dive", str, query_llm_wrapper),
|
321 |
+
("/condense-summary", list, chain_of_density_wrapper),
|
322 |
("/insight-bullets", list, insights_bullet_wrapper),
|
323 |
+
("/paper-synopsis", list, synopsis_wrapper),
|
324 |
]
|
325 |
command_center = CommandCenter(
|
326 |
default_input_type=str,
|
|
|
336 |
}
|
337 |
jsonify_functions = {
|
338 |
"identity": lambda x: x,
|
339 |
+
"dataframe": lambda x: (
|
340 |
+
x.to_dict(orient="records") if isinstance(x, pd.DataFrame) else x
|
341 |
+
),
|
342 |
"reponse_with_citations": lambda x: x,
|
343 |
}
|
344 |
boot(command_center, formating_functions)
|