|
|
|
from typing import Dict |
|
from langchain import PromptTemplate, LLMChain |
|
from models import chat_model |
|
|
|
report_prompt_template = PromptTemplate( |
|
input_variables=["qa_summary"], |
|
template=( |
|
"You are a wellness assistant. The user provided the following answers:\n\n" |
|
"{qa_summary}\n\n" |
|
"Based on these answers, provide a brief, actionable wellness report. " |
|
"Include simple suggestions to improve their sleep, exercise, stress management, and diet. " |
|
"Consider recommending wellness packages if applicable based on the user's needs, for instance:\n" |
|
"- Fitness & Mobility for exercise-related concerns\n" |
|
"- No More Insomnia for sleep issues\n" |
|
"- Focus Flow for productivity issues\n" |
|
"- Boost Energy for low energy\n" |
|
"- Chronic Care for long-term chronic conditions\n" |
|
"- Mental Wellness for stress and anxiety reduction\n\n" |
|
"Also consider aspects of therapy, maintaining a balanced weight, addressing restless nights, " |
|
"overcoming lack of motivation, improving gut health, managing anxiety, and preventing burnout. " |
|
"Be concise and helpful.\n\n" |
|
"Report:" |
|
) |
|
) |
|
report_chain = LLMChain(llm=chat_model, prompt=report_prompt_template) |
|
|
|
def generate_short_report_for_session(responses: Dict[str, str]) -> str: |
|
qa_summary = "\n".join(f"{q}: {a}" for q, a in responses.items()) |
|
raw_report = report_chain.run(qa_summary=qa_summary) |
|
return raw_report.replace("*", "").replace("**", "") |
|
|