synopsis / insight bullets
Browse files- app.py +47 -3
- autoqa_chains.py +6 -3
- insights_bullet_chain.py +17 -0
- synopsis_chain.py +29 -0
app.py
CHANGED
@@ -17,6 +17,8 @@ from chat_chains import (
|
|
17 |
)
|
18 |
from autoqa_chains import auto_qa_chain, auto_qa_output_parser
|
19 |
from chain_of_density import chain_of_density_chain
|
|
|
|
|
20 |
from custom_exceptions import InvalidArgumentError, InvalidCommandError
|
21 |
|
22 |
st.set_page_config(layout="wide")
|
@@ -34,9 +36,11 @@ Here's a quick guide to getting started with me:
|
|
34 |
| `/library` | View an index of processed documents to easily navigate your research. |
|
35 |
| `/session-expense` | Calculate the cost of our conversation, ensuring transparency in resource usage. |
|
36 |
| `/export` | Download conversation data for your records or further analysis. |
|
37 |
-
| `/auto-insight <document id>` | Automatically generate questions and answers for
|
38 |
-
| `/deep-dive [<list of document ids>] <query>` | Query
|
39 |
-
| `/condense-summary <document id>` | Generate increasingly concise, entity-dense summaries of
|
|
|
|
|
40 |
|
41 |
|
42 |
<br>
|
@@ -185,6 +189,44 @@ def chain_of_density_wrapper(inputs):
|
|
185 |
return (summary, "identity")
|
186 |
|
187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
def auto_qa_chain_wrapper(inputs):
|
189 |
if inputs == "":
|
190 |
raise InvalidArgumentError("Please provide a document id")
|
@@ -256,6 +298,8 @@ if __name__ == "__main__":
|
|
256 |
("/auto-insight", str, auto_qa_chain_wrapper),
|
257 |
("/deep-dive", str, query_llm_wrapper),
|
258 |
("/condense-summary", str, chain_of_density_wrapper),
|
|
|
|
|
259 |
]
|
260 |
command_center = CommandCenter(
|
261 |
default_input_type=str,
|
|
|
17 |
)
|
18 |
from autoqa_chains import auto_qa_chain, auto_qa_output_parser
|
19 |
from chain_of_density import chain_of_density_chain
|
20 |
+
from insights_bullet_chain import insights_bullet_chain
|
21 |
+
from synopsis_chain import synopsis_chain
|
22 |
from custom_exceptions import InvalidArgumentError, InvalidCommandError
|
23 |
|
24 |
st.set_page_config(layout="wide")
|
|
|
36 |
| `/library` | View an index of processed documents to easily navigate your research. |
|
37 |
| `/session-expense` | Calculate the cost of our conversation, ensuring transparency in resource usage. |
|
38 |
| `/export` | Download conversation data for your records or further analysis. |
|
39 |
+
| `/auto-insight <document id>` | Automatically generate questions and answers for the paper. |
|
40 |
+
| `/deep-dive [<list of document ids>] <query>` | Query me with a specific context. |
|
41 |
+
| `/condense-summary <document id>` | Generate increasingly concise, entity-dense summaries of the paper. |
|
42 |
+
| `/insight-bullets <list of document ids>` | Extract and summarize key insights, methods, results, and conclusions. |
|
43 |
+
| `/paper-synopsis <document id>` | Generate a synopsis of the paper. |
|
44 |
|
45 |
|
46 |
<br>
|
|
|
189 |
return (summary, "identity")
|
190 |
|
191 |
|
192 |
+
def synopsis_wrapper(inputs):
|
193 |
+
if inputs == "":
|
194 |
+
raise InvalidArgumentError("Please provide a document id")
|
195 |
+
document = st.session_state.documents[inputs].page_content
|
196 |
+
llm = ChatOpenAI(model="gpt-4-turbo-preview", temperature=0)
|
197 |
+
with get_openai_callback() as cb:
|
198 |
+
summary = synopsis_chain(llm).invoke({"paper": document})
|
199 |
+
stats = cb
|
200 |
+
st.session_state.messages.append(("/paper-synopsis", summary, "identity"))
|
201 |
+
st.session_state.costing.append(
|
202 |
+
{
|
203 |
+
"prompt tokens": stats.prompt_tokens,
|
204 |
+
"completion tokens": stats.completion_tokens,
|
205 |
+
"cost": stats.total_cost,
|
206 |
+
}
|
207 |
+
)
|
208 |
+
return (summary, "identity")
|
209 |
+
|
210 |
+
|
211 |
+
def insights_bullet_wrapper(inputs):
|
212 |
+
if inputs == "":
|
213 |
+
raise InvalidArgumentError("Please provide a document id")
|
214 |
+
document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
|
215 |
+
llm = ChatOpenAI(model="gpt-4-turbo-preview", temperature=0)
|
216 |
+
with get_openai_callback() as cb:
|
217 |
+
insights = insights_bullet_chain(llm).invoke({"paper": document})
|
218 |
+
stats = cb
|
219 |
+
st.session_state.messages.append(("/insight-bullets", insights, "identity"))
|
220 |
+
st.session_state.costing.append(
|
221 |
+
{
|
222 |
+
"prompt tokens": stats.prompt_tokens,
|
223 |
+
"completion tokens": stats.completion_tokens,
|
224 |
+
"cost": stats.total_cost,
|
225 |
+
}
|
226 |
+
)
|
227 |
+
return (insights, "identity")
|
228 |
+
|
229 |
+
|
230 |
def auto_qa_chain_wrapper(inputs):
|
231 |
if inputs == "":
|
232 |
raise InvalidArgumentError("Please provide a document id")
|
|
|
298 |
("/auto-insight", str, auto_qa_chain_wrapper),
|
299 |
("/deep-dive", str, query_llm_wrapper),
|
300 |
("/condense-summary", str, chain_of_density_wrapper),
|
301 |
+
("/insight-bullets", list, insights_bullet_wrapper),
|
302 |
+
("/paper-synopsis", str, synopsis_wrapper),
|
303 |
]
|
304 |
command_center = CommandCenter(
|
305 |
default_input_type=str,
|
autoqa_chains.py
CHANGED
@@ -14,9 +14,12 @@ class AutoQA(BaseModel):
|
|
14 |
|
15 |
|
16 |
qa_prompt_template = """
|
17 |
-
|
18 |
-
The
|
19 |
-
The answers
|
|
|
|
|
|
|
20 |
Your response should be recorded in the following json format: {format_instructions}.
|
21 |
|
22 |
here is the research paper: ####{paper}####
|
|
|
14 |
|
15 |
|
16 |
qa_prompt_template = """
|
17 |
+
Generate 10 insightful questions and their corresponding detailed answers about the key aspects of a specific machine learning research paper.
|
18 |
+
The focus should be on the paper's objectives, methodology, key findings, and implications for future research or application.
|
19 |
+
The answers must be based on the content of the research paper, offering clear and comprehensive insights for readers who may not be experts in the field.
|
20 |
+
Ensure that the questions cover a broad range of topics related to the paper, including but not limited to the introduction, literature review, \
|
21 |
+
methodology, results, discussion, and conclusions.
|
22 |
+
The goal is to capture the essence of the paper in a way that is accessible to a broad audience.
|
23 |
Your response should be recorded in the following json format: {format_instructions}.
|
24 |
|
25 |
here is the research paper: ####{paper}####
|
insights_bullet_chain.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import PromptTemplate
|
2 |
+
from langchain_core.output_parsers import StrOutputParser
|
3 |
+
|
4 |
+
insights_bullet_prompt_template = """
|
5 |
+
Draw the key insights about objective, method, results and conclusions from the given excerpt in the form of bullet points. Also mention the figure or tables referred to along-with the corresponding bullet points
|
6 |
+
Note: if results and conclusions are not much different, feel free to combine them to avoid duplication of information
|
7 |
+
excerpt: {paper}
|
8 |
+
"""
|
9 |
+
|
10 |
+
insights_bullet_output_parser = StrOutputParser()
|
11 |
+
insights_bullet_prompt = PromptTemplate(
|
12 |
+
template=insights_bullet_prompt_template,
|
13 |
+
input_variables=["paper"],
|
14 |
+
)
|
15 |
+
insights_bullet_chain = (
|
16 |
+
lambda model: insights_bullet_prompt | model | insights_bullet_output_parser
|
17 |
+
)
|
synopsis_chain.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import PromptTemplate
|
2 |
+
from langchain_core.output_parsers import StrOutputParser
|
3 |
+
|
4 |
+
synopsis_prompt_template = """
|
5 |
+
Research Paper: {paper}
|
6 |
+
|
7 |
+
Could you provide a concise summary of this paper, highlighting the following key points:
|
8 |
+
|
9 |
+
Objective: Begin by briefly describing the primary goal of the research. What problem is the paper trying to solve, or what hypothesis is it testing? Mention the specific domain of machine learning it pertains to (e.g., supervised learning, unsupervised learning, reinforcement learning, deep learning, etc.).
|
10 |
+
|
11 |
+
Background: Provide a concise overview of the context and motivation behind the research. Why is this problem important? What are the key challenges that previous studies have not addressed, which this paper seeks to overcome?
|
12 |
+
|
13 |
+
Methods: Summarize the methodology used in the study. What are the key techniques, algorithms, or models proposed or evaluated? Mention any novel approach or significant modification to existing methods. Include information on the dataset(s) used, if applicable.
|
14 |
+
|
15 |
+
Results: Highlight the main findings of the paper. What were the outcomes of applying the proposed methods? Include key metrics or statistics that demonstrate the effectiveness, efficiency, or advancements over previous approaches.
|
16 |
+
|
17 |
+
Discussion and Implications: Discuss the significance of the results. What do these findings imply for the field of machine learning? How can they be applied in practice, or what future research directions do they suggest?
|
18 |
+
|
19 |
+
Limitations: Briefly note any limitations or caveats of the study. Are there any specific conditions under which the findings may not hold? What aspects of the research could be improved upon?
|
20 |
+
|
21 |
+
Conclusion: Conclude with a summary of the research paper's contributions to the field of machine learning. Reiterate the importance of the problem addressed and the impact of the findings.
|
22 |
+
"""
|
23 |
+
|
24 |
+
synopsis_output_parser = StrOutputParser()
|
25 |
+
synopsis_prompt = PromptTemplate(
|
26 |
+
template=synopsis_prompt_template,
|
27 |
+
input_variables=["paper"],
|
28 |
+
)
|
29 |
+
synopsis_chain = lambda model: synopsis_prompt | model | synopsis_output_parser
|