Ritvik19 commited on
Commit
709e378
·
verified ·
1 Parent(s): a6a480f
Files changed (3) hide show
  1. app.py +23 -1
  2. autoqa_chain.py +15 -0
  3. tldr_chain.py +26 -0
app.py CHANGED
@@ -15,7 +15,7 @@ from chat_chains import (
15
  parse_context_and_question,
16
  ai_response_format,
17
  )
18
- from autoqa_chains import auto_qa_chain
19
  from chain_of_density import chain_of_density_chain
20
  from insights_bullet_chain import insights_bullet_chain
21
  from insights_mind_map_chain import insights_mind_map_chain
@@ -23,6 +23,7 @@ from synopsis_chain import synopsis_chain
23
  from custom_exceptions import InvalidArgumentError, InvalidCommandError
24
  from openai_configuration import openai_parser
25
  from summary_chain import summary_chain
 
26
 
27
  st.set_page_config(layout="wide")
28
 
@@ -48,6 +49,7 @@ Here's a quick guide to getting started with me:
48
  | `/paper-synopsis <list of snippet ids>` | Generate a synopsis of the paper. |
49
  | `/deep-dive [<list of snippet ids>] <query>` | Query me with a specific context. |
50
  | `/summarise-section [<list of snippet ids>] <section name>` | Summarize a specific section of the paper. |
 
51
 
52
 
53
  <br>
@@ -249,6 +251,25 @@ def synopsis_wrapper(inputs):
249
  return (summary, "identity")
250
 
251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  def insights_bullet_wrapper(inputs):
253
  if inputs == []:
254
  raise InvalidArgumentError("Please provide snippet ids")
@@ -381,6 +402,7 @@ if __name__ == "__main__":
381
  ("/insight-mind-map", list, insights_mind_map_wrapper),
382
  ("/paper-synopsis", list, synopsis_wrapper),
383
  ("/summarise-section", str, summarise_wrapper),
 
384
  ]
385
  command_center = CommandCenter(
386
  default_input_type=str,
 
15
  parse_context_and_question,
16
  ai_response_format,
17
  )
18
+ from autoqa_chain import auto_qa_chain
19
  from chain_of_density import chain_of_density_chain
20
  from insights_bullet_chain import insights_bullet_chain
21
  from insights_mind_map_chain import insights_mind_map_chain
 
23
  from custom_exceptions import InvalidArgumentError, InvalidCommandError
24
  from openai_configuration import openai_parser
25
  from summary_chain import summary_chain
26
+ from tldr_chain import tldr_chain
27
 
28
  st.set_page_config(layout="wide")
29
 
 
49
  | `/paper-synopsis <list of snippet ids>` | Generate a synopsis of the paper. |
50
  | `/deep-dive [<list of snippet ids>] <query>` | Query me with a specific context. |
51
  | `/summarise-section [<list of snippet ids>] <section name>` | Summarize a specific section of the paper. |
52
+ | `/tldr <list of snippet ids>` | Generate a tldr summary of the paper. |
53
 
54
 
55
  <br>
 
251
  return (summary, "identity")
252
 
253
 
254
+ def tldr_wrapper(inputs):
255
+ if inputs == []:
256
+ raise InvalidArgumentError("Please provide snippet ids")
257
+ document = "\n\n".join([st.session_state.documents[c].page_content for c in inputs])
258
+ llm = ChatOpenAI(model=st.session_state.model, temperature=0)
259
+ with get_openai_callback() as cb:
260
+ summary = tldr_chain(llm).invoke({"paper": document})
261
+ stats = cb
262
+ st.session_state.messages.append(("/tldr", summary, "identity"))
263
+ st.session_state.costing.append(
264
+ {
265
+ "prompt tokens": stats.prompt_tokens,
266
+ "completion tokens": stats.completion_tokens,
267
+ "cost": stats.total_cost,
268
+ }
269
+ )
270
+ return (summary, "identity")
271
+
272
+
273
  def insights_bullet_wrapper(inputs):
274
  if inputs == []:
275
  raise InvalidArgumentError("Please provide snippet ids")
 
402
  ("/insight-mind-map", list, insights_mind_map_wrapper),
403
  ("/paper-synopsis", list, synopsis_wrapper),
404
  ("/summarise-section", str, summarise_wrapper),
405
+ ("/tldr", list, tldr_wrapper),
406
  ]
407
  command_center = CommandCenter(
408
  default_input_type=str,
autoqa_chain.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.output_parsers import JsonOutputParser
2
+ from langchain_core.prompts import PromptTemplate
3
+
4
+ qa_prompt_template = """
5
+ Create a mind map of questions (based on the given abstract) that will help understand a machine learning research paper.
6
+ Ensure that the outline is structured in the following JSON array for clarity, such that each section should have two keys: "section_name" and "questions"
7
+
8
+ Here is the research paper abstract: ####{paper}####
9
+ """
10
+
11
+ qa_prompt = PromptTemplate(
12
+ template=qa_prompt_template,
13
+ input_variables=["paper"],
14
+ )
15
+ auto_qa_chain = lambda model: qa_prompt | model | JsonOutputParser()
tldr_chain.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.prompts import PromptTemplate
2
+ from langchain_core.output_parsers import StrOutputParser
3
+
4
+ tldr_prompt_template = """
5
+ Create a mind map of the given research paper along the given lines:
6
+ 1. Background: A brief overview of what's being studied in what context
7
+ 2. Justification: Why the researchers conducted the study
8
+ 3. Method: How the researchers arrived at the result
9
+ 4. Major Findings: The main findings of the study
10
+ 5. Key Results: More details about the results of the study
11
+ 6. Conclusion: Significance of the findings and what they mean for future research
12
+
13
+ The above sections may differ from paper to paper, hence you may need to adjust the structure accordingly by dropping / merging one or more sections.
14
+
15
+ Here is the research paper abstract: ####{paper}####
16
+
17
+ Ensure that the outline is structured in Markdown format for clarity, facilitating its integration into documents or presentations.
18
+ """
19
+
20
+
21
+ tldr_output_parser = StrOutputParser()
22
+ tldr_prompt = PromptTemplate(
23
+ template=tldr_prompt_template,
24
+ input_variables=["paper"],
25
+ )
26
+ tldr_chain = lambda model: tldr_prompt | model | tldr_output_parser