File size: 1,543 Bytes
25da467 7cf3e06 bad89f0 7cf3e06 25da467 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
# chain_reports.py
from typing import Dict
from langchain import PromptTemplate, LLMChain
from models import chat_model
report_prompt_template = PromptTemplate(
input_variables=["qa_summary"],
template=(
"You are a wellness assistant. The user provided the following answers:\n\n"
"{qa_summary}\n\n"
"Based on these answers, provide a brief, actionable wellness report. "
"Include simple suggestions to improve their sleep, exercise, stress management, and diet. "
"Consider recommending wellness packages if applicable based on the user's needs, for instance:\n"
"- Fitness & Mobility for exercise-related concerns\n"
"- No More Insomnia for sleep issues\n"
"- Focus Flow for productivity issues\n"
"- Boost Energy for low energy\n"
"- Chronic Care for long-term chronic conditions\n"
"- Mental Wellness for stress and anxiety reduction\n\n"
"Also consider aspects of therapy, maintaining a balanced weight, addressing restless nights, "
"overcoming lack of motivation, improving gut health, managing anxiety, and preventing burnout. "
"Be concise and helpful.\n\n"
"Report:"
)
)
report_chain = LLMChain(llm=chat_model, prompt=report_prompt_template)
def generate_short_report_for_session(responses: Dict[str, str]) -> str:
qa_summary = "\n".join(f"{q}: {a}" for q, a in responses.items())
raw_report = report_chain.run(qa_summary=qa_summary)
return raw_report.replace("*", "").replace("**", "")
|