|
--- |
|
license: gpl |
|
--- |
|
# Counseling with CAMEL |
|
|
|
### Setup |
|
``` |
|
import argparse |
|
import json |
|
import multiprocessing |
|
import re |
|
import traceback |
|
from abc import ABC, abstractmethod |
|
from pathlib import Path |
|
|
|
import requests |
|
from langchain.prompts import PromptTemplate |
|
from langchain_openai import OpenAI |
|
``` |
|
|
|
### Define Agents |
|
``` |
|
class Agent(): |
|
def __init__(self, vLLM_server, model_id): |
|
self.llm = OpenAI( |
|
temperature=0.0, |
|
openai_api_key='EMPTY', |
|
openai_api_base=vLLM_server, |
|
max_tokens=512, |
|
model=model_id |
|
) |
|
|
|
def generate(self): |
|
pass |
|
``` |
|
|
|
``` |
|
class CBTAgent(Agent): |
|
def __init__(self, prompt, vLLM_server, model_id): |
|
super().__init__(vLLM_server, model_id) |
|
self.prompt_template = PromptTemplate( |
|
input_variables=[ |
|
"client_information", |
|
"reason_counseling", |
|
'history', |
|
], |
|
template=prompt |
|
) |
|
|
|
def generate(self, client_information, reason, history): |
|
history_text = '\n'.join( |
|
[ |
|
f"{message['role'].capitalize()}: {message['message']}" |
|
for message in history |
|
] |
|
) |
|
prompt = self.prompt_template.format( |
|
client_information=client_information, |
|
reason_counseling=reason, |
|
history= history_text |
|
) |
|
response = self.llm.invoke(prompt) |
|
|
|
try: |
|
cbt_technique = response.split("Counseling")[0].replace("\n", "") |
|
except: |
|
cbt_technique = None |
|
try: |
|
cbt_plan = response.split("Counseling planning:\n")[1].split("\nCBT")[0] |
|
except: |
|
cbt_plan = None |
|
|
|
return cbt_technique, cbt_plan |
|
``` |
|
|
|
``` |
|
class CounsleorAgent(Agent): |
|
def __init__(self, prompt, vLLM_server, model_id, cbt_plan): |
|
super().__init__(vLLM_server, model_id) |
|
self.cbt_plan = cbt_plan |
|
self.prompt_template = PromptTemplate( |
|
input_variables=[ |
|
"client_information", |
|
"reason_counseling", |
|
"cbt_plan", |
|
"history" |
|
], |
|
template=prompt |
|
) |
|
|
|
def generate(self, client_information, reason, history): |
|
history_text = '\n'.join( |
|
[ |
|
f"{message['role'].capitalize()}: {message['message']}" |
|
for message in history |
|
] |
|
) |
|
prompt = self.prompt_template.format( |
|
client_information=client_information, |
|
reason_counseling=reason, |
|
cbt_plan=self.cbt_plan, |
|
history=history_text, |
|
) |
|
# print(prompt) |
|
response = self.llm.invoke(prompt) |
|
# print(f"Response: {response}") |
|
|
|
if "'message':" in response: |
|
response = response.split("'message':")[1].split(", {")[0].replace("\"","").replace("]", "").replace("}", "") |
|
return response.split("Counselor:")[-1].replace("\n", "").replace("\\", "").replace("\"","").strip() |
|
``` |
|
|
|
### Define prompt templates |
|
``` |
|
RESPONSE_PROMPT="""<|start_header_id|>system<|end_header_id|> |
|
|
|
You are playing the role of a counselor in a psychological counseling session. Your task is to use the provided client information and counseling planning to generate the next counselor utterance in the dialogue. The goal is to create a natural and engaging response that builds on the previous conversation and aligns with the counseling plan.<|eot_id|><|start_header_id|>user<|end_header_id|> |
|
|
|
Client Information: |
|
{client_information} |
|
|
|
Reason for seeking counseling: |
|
{reason_counseling} |
|
|
|
Counseling planning: |
|
{cbt_plan} |
|
|
|
Counseling Dialogue: |
|
{history}<|eot_id|><|start_header_id|>assistant<|end_header_id|> |
|
|
|
""" |
|
``` |
|
|
|
``` |
|
CBT_PLAN_PROMPT="""<|start_header_id|>system<|end_header_id|> |
|
|
|
You are a counselor specializing in CBT techniques. Your task is to use the provided client information, and dialogue to generate an appropriate CBT technique and a detailed counseling plan.<|eot_id|><|start_header_id|>user<|end_header_id|> |
|
|
|
Types of CBT Techniques: |
|
Efficiency Evaluation, Pie Chart Technique, Alternative Perspective, Decatastrophizing, Pros and Cons Analysis, Evidence-Based Questioning, Reality Testing, Continuum Technique, Changing Rules to Wishes, Behavior Experiment, Problem-Solving Skills Training, Systematic Exposure |
|
|
|
Client Information: |
|
{client_information} |
|
|
|
Reason for seeking counseling: |
|
{reason_counseling} |
|
|
|
Counseling Dialogue: |
|
{history} |
|
|
|
Choose an appropriate CBT technique and create a counseling plan based on that technique.<|eot_id|><|start_header_id|>assistant<|end_header_id|>""" |
|
``` |
|
|
|
### Start! |
|
``` |
|
def collect_info(name, age, gender, occupation, education, matrital_status, family_details, reason): |
|
CLINET_INFO = f"""Name: {name} |
|
Age: {age} |
|
Gender: {gender} |
|
Occupation: {occupation} |
|
Education: {education} |
|
Marital Status: {matrital_status} |
|
Family Details: {family_details}""" |
|
|
|
REASON_FOR_COUNSELING = reason |
|
HISTORY_INIT = f"Counselor: Hi {name}, it's nice to meet you. How can I assist you today?\nClient: " |
|
|
|
return CLINET_INFO, REASON_FOR_COUNSELING, HISTORY_INIT |
|
|
|
def start_demo(intake_form, reason, history_init): |
|
model_id = "DLI-Lab/camel" |
|
vLLM_server = ```YOUR vLLM SERVER``` |
|
max_turns = 20 |
|
|
|
print("Welcome to the Multi-Turn ClientAgent Demo!\n") |
|
print(f"[Intake Form]") |
|
print(intake_form) |
|
print("Type 'exit' to quit the demo.\n") |
|
|
|
print("====== Counseling Session ======\n") |
|
first_response = history_init.split('Counselor: ')[-1].split('\nClient')[0] |
|
print(f"Counselor: {first_response}") |
|
|
|
num_turn = 0 |
|
while num_turn < max_turns: |
|
if num_turn == 0: |
|
user_input = input("You (Client): ") |
|
# print(f"You (Client): {user_input}") |
|
history_init = history_init + user_input |
|
history = [ |
|
{"role": "Counselor", "message": history_init.split("Counselor: ")[-1].split("\nClient")[0]}, |
|
{"role": "Client", "message": history_init.split("Client: ")[-1]} |
|
] |
|
# print("CBT Planning") |
|
CBT_Planner = CBTAgent(CBT_PLAN_PROMPT, vLLM_server, model_id) |
|
cbt_technique, cbt_plan = CBT_Planner.generate(intake_form, reason, history) |
|
# print(f"CBT Technique: {cbt_technique}") |
|
# print(f"CBT Plan: {cbt_plan}") |
|
|
|
num_turn+=1 |
|
else: |
|
counselor = CounsleorAgent(RESPONSE_PROMPT, vLLM_server, model_id, cbt_plan) |
|
counselor_response = counselor.generate(intake_form, reason, history) |
|
print(f"Counselor: {counselor_response}") |
|
|
|
history.append({"role": "Counselor", "message": counselor_response}) |
|
|
|
user_input = input("You (Client): ") |
|
|
|
if user_input.lower() == 'exit': |
|
print("\n====== Exiting the demo. Goodbye! ======\n") |
|
break |
|
|
|
print(f"You (Client): {user_input}") |
|
history.append({"role": "Client", "message": user_input}) |
|
|
|
num_turn+=1 |
|
|
|
print("Demo completed.") |
|
return cbt_plan, history |
|
|
|
|
|
## Example |
|
# name = "Laura" |
|
# age = "45" |
|
# gender = "female" |
|
# occupation = "Office Job" |
|
# education = "College Graduate" |
|
# matrital_status = "Single" |
|
# family_details = "Lives alone" |
|
|
|
name = input("Let's begin the pre-counseling session. What is your name? ") |
|
age = input("How old are you? ") |
|
gender = input("What is your gender? (e.g., Male, Female)") |
|
occupation = input("What is your occupation? ") |
|
education = input("What is your highest level of education? (e.g., College Graduate)") |
|
marital_status = input("What is your marital status? (e.g., Single, Married)") |
|
family_details = input("Can you briefly describe your family situation? (e.g., Lives alone)") |
|
reason = input("What brings you here for counseling? Please explain briefly. ") |
|
|
|
|
|
CLINET_INFO, REASON_FOR_COUNSELING, HISTORY_INIT = collect_info(name, age, gender, occupation, education, matrital_status, family_details, reason) |
|
cbt_plan, history = start_demo(CLINET_INFO, REASON_FOR_COUNSELING, HISTORY_INIT) |
|
|
|
print(f"CBT Plan: {cbt_plan}\n\n") |
|
|
|
for message in history: |
|
print(f"{message['role']}: {message['message']}") |
|
``` |