section summary
Browse files- app.py +23 -1
- summary_chain.py +18 -0
app.py
CHANGED
@@ -21,6 +21,7 @@ from insights_bullet_chain import insights_bullet_chain
|
|
21 |
from synopsis_chain import synopsis_chain
|
22 |
from custom_exceptions import InvalidArgumentError, InvalidCommandError
|
23 |
from openai_configuration import openai_parser
|
|
|
24 |
|
25 |
st.set_page_config(layout="wide")
|
26 |
|
@@ -44,6 +45,7 @@ Here's a quick guide to getting started with me:
|
|
44 |
| `/insight-bullets <list of snippet ids>` | Extract and summarize key insights, methods, results, and conclusions. |
|
45 |
| `/paper-synopsis <list of snippet ids>` | Generate a synopsis of the paper. |
|
46 |
| `/deep-dive [<list of snippet ids>] <query>` | Query me with a specific context. |
|
|
|
47 |
|
48 |
|
49 |
<br>
|
@@ -182,10 +184,29 @@ def rag_llm_wrapper(inputs):
|
|
182 |
def query_llm_wrapper(inputs):
|
183 |
context, question = parse_context_and_question(inputs)
|
184 |
relevant_docs = [st.session_state.documents[c] for c in context]
|
185 |
-
print(context, question)
|
186 |
return query_llm(question, relevant_docs)
|
187 |
|
188 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
def chain_of_density_wrapper(inputs):
|
190 |
if inputs == []:
|
191 |
raise InvalidArgumentError("Please provide snippet ids")
|
@@ -326,6 +347,7 @@ if __name__ == "__main__":
|
|
326 |
("/condense-summary", list, chain_of_density_wrapper),
|
327 |
("/insight-bullets", list, insights_bullet_wrapper),
|
328 |
("/paper-synopsis", list, synopsis_wrapper),
|
|
|
329 |
]
|
330 |
command_center = CommandCenter(
|
331 |
default_input_type=str,
|
|
|
21 |
from synopsis_chain import synopsis_chain
|
22 |
from custom_exceptions import InvalidArgumentError, InvalidCommandError
|
23 |
from openai_configuration import openai_parser
|
24 |
+
from summary_chain import summary_chain
|
25 |
|
26 |
st.set_page_config(layout="wide")
|
27 |
|
|
|
45 |
| `/insight-bullets <list of snippet ids>` | Extract and summarize key insights, methods, results, and conclusions. |
|
46 |
| `/paper-synopsis <list of snippet ids>` | Generate a synopsis of the paper. |
|
47 |
| `/deep-dive [<list of snippet ids>] <query>` | Query me with a specific context. |
|
48 |
+
| `/summarise-section [<list of snippet ids>] <section name>` | Summarize a specific section of the paper. |
|
49 |
|
50 |
|
51 |
<br>
|
|
|
184 |
def query_llm_wrapper(inputs):
|
185 |
context, question = parse_context_and_question(inputs)
|
186 |
relevant_docs = [st.session_state.documents[c] for c in context]
|
|
|
187 |
return query_llm(question, relevant_docs)
|
188 |
|
189 |
|
190 |
+
def summarise_wrapper(inputs):
|
191 |
+
context, query = parse_context_and_question(inputs)
|
192 |
+
document = [st.session_state.documents[c] for c in context]
|
193 |
+
llm = ChatOpenAI(model=st.session_state.model, temperature=0)
|
194 |
+
with get_openai_callback() as cb:
|
195 |
+
summary = summary_chain(llm).invoke({"section_name": query, "paper": document})
|
196 |
+
stats = cb
|
197 |
+
st.session_state.messages.append(
|
198 |
+
(f"/summarise-section {query}", summary, "identity")
|
199 |
+
)
|
200 |
+
st.session_state.costing.append(
|
201 |
+
{
|
202 |
+
"prompt tokens": stats.prompt_tokens,
|
203 |
+
"completion tokens": stats.completion_tokens,
|
204 |
+
"cost": stats.total_cost,
|
205 |
+
}
|
206 |
+
)
|
207 |
+
return (summary, "identity")
|
208 |
+
|
209 |
+
|
210 |
def chain_of_density_wrapper(inputs):
|
211 |
if inputs == []:
|
212 |
raise InvalidArgumentError("Please provide snippet ids")
|
|
|
347 |
("/condense-summary", list, chain_of_density_wrapper),
|
348 |
("/insight-bullets", list, insights_bullet_wrapper),
|
349 |
("/paper-synopsis", list, synopsis_wrapper),
|
350 |
+
("/summarise-section", str, summarise_wrapper),
|
351 |
]
|
352 |
command_center = CommandCenter(
|
353 |
default_input_type=str,
|
summary_chain.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.prompts import PromptTemplate
|
2 |
+
from langchain_core.output_parsers import StrOutputParser
|
3 |
+
|
4 |
+
summary_prompt_template = """
|
5 |
+
Given the {section_name} section of a machine learning research paper, produce a comprehensive summary that encompasses all vital information, \
|
6 |
+
and detailed explanations of any mathematical equations present.
|
7 |
+
The goal is for this summary to function as an autonomous document that conveys the essence and key contributions of the research succinctly.
|
8 |
+
Ensure that if any mathematical content is present it is not only included but also clearly elucidated, highlighting its relevance to the research's overall objectives and results.
|
9 |
+
Structure the summary to be easily understandable, offering readers a full grasp of the section's critical insights without the need to consult the original paper.
|
10 |
+
|
11 |
+
Here is the excerpt from the research paper: {paper}
|
12 |
+
"""
|
13 |
+
summary_output_parser = StrOutputParser()
|
14 |
+
summary_prompt = PromptTemplate(
|
15 |
+
template=summary_prompt_template,
|
16 |
+
input_variables=["section_name", "paper"],
|
17 |
+
)
|
18 |
+
summary_chain = lambda model: summary_prompt | model | summary_output_parser
|