Spaces:
Sleeping
Sleeping
# chain_recommendations.py | |
import json | |
from typing import Dict | |
from langchain import PromptTemplate, LLMChain | |
from models import chat_model | |
improved_recommend_prompt_template = PromptTemplate( | |
input_variables=["problems"], | |
template=( | |
"You are a wellness recommendation assistant. Given the following problem severity percentages:\n" | |
"{problems}\n\n" | |
"Carefully analyze these percentages and consider nuanced differences between the areas. " | |
"Your goal is to recommend the most appropriate wellness packages based on a detailed assessment of these numbers, " | |
"not just fixed thresholds. Consider the following guidelines:\n\n" | |
"- If one area is extremely high (above 70) while others are lower, prioritize a package targeting that area.\n" | |
"- If multiple areas are high or near high (e.g., above 60), consider recommending multiple specialized packages or a comprehensive program.\n" | |
"- If all areas are moderate (between 30 and 70), recommend a balanced wellness package that addresses overall health.\n" | |
"- If all areas are low, a general wellness package might be sufficient.\n" | |
"- Consider borderline cases and recommend packages that address both current issues and preventive measures.\n\n" | |
"Return the recommended wellness packages in a JSON array format." | |
) | |
) | |
# Initialize the improved recommendation chain | |
recommend_chain = LLMChain(llm=chat_model, prompt=improved_recommend_prompt_template) | |
def generate_recommendations(problems: Dict[str, float]) -> str: | |
recommendations = recommend_chain.run(problems=json.dumps(problems)) | |
return recommendations.strip() | |