|
from langchain_core.pydantic_v1 import BaseModel, Field |
|
from typing import List |
|
from langchain_core.output_parsers import JsonOutputParser |
|
from langchain_core.prompts import PromptTemplate |
|
|
|
|
|
class QA(BaseModel): |
|
question: str = Field(description="question") |
|
answer: str = Field(description="answer") |
|
|
|
|
|
class AutoQA(BaseModel): |
|
questions: List[QA] = Field(description="list of question and answers") |
|
|
|
|
|
qa_prompt_template = """ |
|
Generate 10 insightful questions and their corresponding detailed answers about the key aspects of a specific machine learning research paper. |
|
The focus should be on the paper's objectives, methodology, key findings, and implications for future research or application. |
|
The answers must be based on the content of the research paper, offering clear and comprehensive insights for readers. |
|
Ensure that the questions cover a broad range of topics related to the paper, including but not limited to the introduction, literature review, \ |
|
methodology, results, discussion, and conclusions. |
|
The goal is to capture the essence of the paper in a way that is accessible to an expert audience. |
|
Your response should be recorded in the following json format: {format_instructions}. |
|
|
|
here is the research paper: ####{paper}#### |
|
""" |
|
|
|
auto_qa_output_parser = JsonOutputParser(pydantic_object=AutoQA) |
|
qa_prompt = PromptTemplate( |
|
template=qa_prompt_template, |
|
input_variables=["paper"], |
|
partial_variables={ |
|
"format_instructions": auto_qa_output_parser.get_format_instructions() |
|
}, |
|
) |
|
auto_qa_chain = lambda model: qa_prompt | model |
|
|
|
|
|
followup_prompt_template = """ |
|
Question: {question} |
|
Answer: {answer} |
|
Based on the above question and answer and the research paper as your context, come up with a followup question and its answer. |
|
The answer should be a bit detailed and strictly based on the research paper. |
|
Your response should be recorded in the following json format: {format_instructions}. |
|
|
|
here is the research paper: ####{paper}#### |
|
""" |
|
|
|
followup_prompt = PromptTemplate( |
|
template=followup_prompt_template, |
|
input_variables=["paper", "question", "answer"], |
|
partial_variables={ |
|
"format_instructions": auto_qa_output_parser.get_format_instructions() |
|
}, |
|
) |
|
|
|
followup_qa_chain = lambda model: followup_prompt | model |
|
|