second initial commit
Browse files- .env_example +2 -0
- README.md +24 -0
- app.py +54 -0
- {core/app → app}/__init__.py +0 -0
- app/app.py +162 -0
- app/app_config.py +84 -0
- app/utils.py +68 -0
- cache/aeb9181a-771b-4843-81ed-333364947eb7.jpg +0 -0
- core/llms/base_llm.py +61 -10
- core/llms/litellm_llm.py +7 -17
- core/llms/utils.py +88 -0
- core/prompts/__init__.py +2 -0
- core/prompts/chat_mark_chat.py +2 -0
- core/prompts/think_mark_think.py +166 -0
- core/types.py +58 -0
- core/utils.py +115 -0
- hehe.jpg +0 -0
- input_config.json +7 -0
- poetry.lock +0 -0
- pyproject.toml +21 -0
- sometext.txt +0 -0
- ten.py +12 -0
- tt.py +4 -0
.env_example
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
MODEL_NAME=openai/gpt-3.5-turbo
|
2 |
+
MODEL_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
README.md
CHANGED
@@ -1,2 +1,26 @@
|
|
1 |
# open-o1
|
2 |
o1 like
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# open-o1
|
2 |
o1 like
|
3 |
+
|
4 |
+
## TODO
|
5 |
+
[ ] add fallback llms
|
6 |
+
[ ] better error handling
|
7 |
+
[ ] Add Tools (web, math, code)
|
8 |
+
[ ] Make cli
|
9 |
+
|
10 |
+
|
11 |
+
## What it does
|
12 |
+
- It takes a prompt , thinks, thinks again, critics itself, then returns answer
|
13 |
+
|
14 |
+
## Installation
|
15 |
+
|
16 |
+
```bash
|
17 |
+
git clone https://github.com/tikendraw/open-o1.git
|
18 |
+
|
19 |
+
cd open-o1
|
20 |
+
|
21 |
+
pip install -r requirements.txt
|
22 |
+
|
23 |
+
streamlit run app.py
|
24 |
+
```
|
25 |
+
|
26 |
+
HAVE FUN.
|
app.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from core.llms import LLM
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
from typing import Annotated, Optional
|
5 |
+
# from function_schema import Doc
|
6 |
+
from core.llms.utils import user_message_with_images
|
7 |
+
|
8 |
+
load_dotenv('../.global_env')
|
9 |
+
|
10 |
+
# def get_weather(
|
11 |
+
# city: Annotated[str, "The city to get the weather for"], # <- string value of Annotated is used as a description
|
12 |
+
# unit: Annotated[Optional[str], "The unit to return the temperature in"] = "celcius",
|
13 |
+
# ) -> str:
|
14 |
+
# """Returns the weather for the given city."""
|
15 |
+
# return f"Weather for {city} is 20°C"
|
16 |
+
|
17 |
+
|
18 |
+
# def get_distance(city1: Annotated[str, 'city to start journey from'], city2: Annotated[str, 'city where journey ends']) -> float:
|
19 |
+
# ''' Returns distance between two cities '''
|
20 |
+
# return f"{city1} --- {city2}: 10 KM"
|
21 |
+
|
22 |
+
# tools = {"get_weather": get_weather, 'get_distance':get_distance}
|
23 |
+
|
24 |
+
# models= [
|
25 |
+
# ('gemini/gemini-1.5-flash', 'GEMINI_API_KEY'),
|
26 |
+
# ('groq/llava-v1.5-7b-4096-preview', 'GROQ_API_KEY')
|
27 |
+
# ]
|
28 |
+
|
29 |
+
# model , api_key = models[0]
|
30 |
+
# api_key = os.getenv(api_key)
|
31 |
+
|
32 |
+
# llm = LLM(api_key=api_key, model=model)
|
33 |
+
|
34 |
+
# messages = [
|
35 |
+
# {"role": "system", "content": "You are a helpful assistant."},
|
36 |
+
# {"role": "user", "content": "Who won the world series in 2020?"},
|
37 |
+
# {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
|
38 |
+
# # {"role": "user", "content": "whats weather in new york, what is distance between new york and las vegas"},
|
39 |
+
# # user_message_with_images(
|
40 |
+
# # 'explain this image',
|
41 |
+
# # file_path_list = ['./hehe.jpg'],
|
42 |
+
# # max_size_px=512,
|
43 |
+
|
44 |
+
# # )
|
45 |
+
|
46 |
+
# ]
|
47 |
+
|
48 |
+
# response = llm.chat(messages)
|
49 |
+
|
50 |
+
# print('response: ', response)
|
51 |
+
from app.app import main
|
52 |
+
|
53 |
+
if __name__ == '__main__':
|
54 |
+
main()
|
{core/app → app}/__init__.py
RENAMED
File without changes
|
app/app.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from calendar import c
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from math import exp
|
5 |
+
from webbrowser import get
|
6 |
+
from litellm.types.utils import ModelResponse
|
7 |
+
import streamlit as st
|
8 |
+
from streamlit.runtime.uploaded_file_manager import UploadedFile
|
9 |
+
from app.utils import generate_answer, dict_to_markdown
|
10 |
+
from core.types import ThoughtStepsDisplay, ThoughtSteps, BigMessage , Message
|
11 |
+
from .app_config import InputConfig, ENV_FILE_PATH, CONFIG_FILE_PATH
|
12 |
+
from core.llms.base_llm import BaseLLM
|
13 |
+
from core.llms.litellm_llm import LLM
|
14 |
+
from core.llms.utils import user_message_with_images
|
15 |
+
from PIL import Image
|
16 |
+
from core.prompts.think_mark_think import SYSTEM_PROMPT
|
17 |
+
|
18 |
+
st.set_page_config(page_title="Open-o1", page_icon="🧠", layout="wide")
|
19 |
+
st.title('Open-O1')
|
20 |
+
st.write('Welcome to Open-O1!')
|
21 |
+
|
22 |
+
def config_sidebar(config:InputConfig) -> InputConfig:
|
23 |
+
st.sidebar.header('Configuration')
|
24 |
+
model_name = st.sidebar.text_input('Enter Model Name: e.g. provider/model-name',value=config.model_name, placeholder='openai/gpt-3.5-turbo')
|
25 |
+
model_api_key = st.sidebar.text_input('Enter API Key: ',type='password',value=config.model_api_key, placeholder='sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
|
26 |
+
max_tokens = st.sidebar.number_input('Enter Max Tokens per Thought: ',value=config.max_tokens, min_value=1)
|
27 |
+
max_steps = st.sidebar.number_input('Enter Max Thinking Steps: ',value=config.max_steps, min_value=1, step=1, )
|
28 |
+
temperature = st.sidebar.number_input('Enter Temperature: ',value=config.temperature, min_value=0.0, step=0.1, max_value=10.0)
|
29 |
+
timeout = st.sidebar.number_input('Enter timeout(seconds): ',value=config.timeout, min_value=0.0,step = 1.0)
|
30 |
+
sleeptime = st.sidebar.number_input('Enter Sleep Time(seconds): (time bw requests to avoid rate limit)',value=config.sleeptime, min_value=0.0, step = 1.0)
|
31 |
+
|
32 |
+
config.model_name = model_name
|
33 |
+
config.model_api_key = model_api_key
|
34 |
+
config.max_tokens = max_tokens
|
35 |
+
config.max_steps = max_steps
|
36 |
+
config.temperature = temperature
|
37 |
+
config.timeout = timeout
|
38 |
+
config.sleeptime = sleeptime
|
39 |
+
|
40 |
+
if st.sidebar.button('Save config'):
|
41 |
+
config.save(env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH)
|
42 |
+
st.sidebar.success('Config saved!')
|
43 |
+
|
44 |
+
return config
|
45 |
+
|
46 |
+
def load_llm(config:InputConfig, tools=None) -> BaseLLM:
|
47 |
+
return LLM(api_key=config.model_api_key, model=config.model_name, tools=tools)
|
48 |
+
|
49 |
+
|
50 |
+
def image_buffer_to_pillow_image(image_buffer:UploadedFile) -> Image.Image:
|
51 |
+
return Image.open(image_buffer)
|
52 |
+
|
53 |
+
|
54 |
+
def process_user_input(user_input:str, image:Image.Image=None)->dict:
|
55 |
+
if image:
|
56 |
+
message = [user_message_with_images(user_msg_str=user_input, images=[image])]
|
57 |
+
else:
|
58 |
+
message = [{"role": "user", "content": user_input}]
|
59 |
+
return message
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
def main():
|
65 |
+
|
66 |
+
config = InputConfig.load(env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH)
|
67 |
+
config = config_sidebar(config=config)
|
68 |
+
llm = load_llm(config)
|
69 |
+
|
70 |
+
current_tab = ''
|
71 |
+
|
72 |
+
current_tab='o1_tab'
|
73 |
+
messages_attr_name = f"{current_tab}_messages"
|
74 |
+
big_message_attr_name = f"{current_tab}_big_messages"
|
75 |
+
|
76 |
+
|
77 |
+
clear_chat_bt = st.sidebar.button('Clear Chat')
|
78 |
+
if clear_chat_bt:
|
79 |
+
delattr(st.session_state, messages_attr_name)
|
80 |
+
|
81 |
+
|
82 |
+
message_attr = set_and_get_state_attr(messages_attr_name, default_value=[])
|
83 |
+
big_message_attr = set_and_get_state_attr(big_message_attr_name, default_value=[])
|
84 |
+
|
85 |
+
# this prints the older messages
|
86 |
+
for message in big_message_attr:
|
87 |
+
with st.chat_message(message.role):
|
88 |
+
|
89 |
+
for thought in message.thoughts:
|
90 |
+
print_thought(thought.to_thought_steps_display(), is_final=False)
|
91 |
+
|
92 |
+
if message.content:
|
93 |
+
if message.role == 'user':
|
94 |
+
st.markdown(message.content)
|
95 |
+
else:
|
96 |
+
print_thought(message.to_thought_steps_display(), is_final=True)
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
if prompt := st.chat_input("What is up bro?"):
|
101 |
+
big_message_attr.append(BigMessage(role="user", content=prompt, thoughts=[]))
|
102 |
+
|
103 |
+
with st.chat_message("user"):
|
104 |
+
st.markdown(prompt)
|
105 |
+
|
106 |
+
with st.chat_message("assistant"):
|
107 |
+
messages = [{
|
108 |
+
"role": "system",
|
109 |
+
"content": SYSTEM_PROMPT
|
110 |
+
}]
|
111 |
+
|
112 |
+
messages += [m.to_message() for m in big_message_attr]
|
113 |
+
|
114 |
+
thoughts = []
|
115 |
+
|
116 |
+
#add json keyword in user message , helps in json output
|
117 |
+
for message in messages:
|
118 |
+
if message["role"] == "user":
|
119 |
+
message["content"] = f"{message['content']}, json format"
|
120 |
+
|
121 |
+
|
122 |
+
for num,step in enumerate(generate_answer(
|
123 |
+
messages=messages,
|
124 |
+
max_steps=config.max_steps,
|
125 |
+
stream=False,
|
126 |
+
max_tokens=config.max_tokens,
|
127 |
+
temperature=config.temperature,
|
128 |
+
sleeptime=config.sleeptime,
|
129 |
+
timeout=config.timeout,
|
130 |
+
llm=llm,
|
131 |
+
response_format={ "type": "json_object" }
|
132 |
+
|
133 |
+
),1):
|
134 |
+
|
135 |
+
thoughts.append(step)
|
136 |
+
print_thought(step.to_thought_steps_display(), is_final=False)
|
137 |
+
|
138 |
+
last_step = thoughts.pop()
|
139 |
+
message_attr.append(BigMessage(
|
140 |
+
role="assistant",
|
141 |
+
content=last_step,
|
142 |
+
thoughts=thoughts
|
143 |
+
))
|
144 |
+
# st.markdown(dict_to_markdown(step.model_dump()))
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
def set_and_get_state_attr(attr_name:str, default_value:any=None) -> any:
|
149 |
+
if attr_name not in st.session_state:
|
150 |
+
setattr(st.session_state, attr_name, default_value)
|
151 |
+
return getattr(st.session_state, attr_name)
|
152 |
+
|
153 |
+
|
154 |
+
def print_thought(thought:ThoughtStepsDisplay, is_final:bool=False):
|
155 |
+
if is_final:
|
156 |
+
st.markdown(thought.md())
|
157 |
+
else:
|
158 |
+
st.markdown(f'\n```json\n{thought.model_dump_json()}\n```\n', unsafe_allow_html=True)
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
|
app/app_config.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from pathlib import Path
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
CUR_DIR = Path(os.path.abspath(__file__)).parent.parent
|
8 |
+
CACHE_DIR = CUR_DIR/'cache'
|
9 |
+
CACHE_DIR.mkdir(exist_ok=True)
|
10 |
+
ENV_FILE_PATH = CUR_DIR / '.env'
|
11 |
+
CONFIG_FILE_PATH = CUR_DIR / 'input_config.json'
|
12 |
+
|
13 |
+
print(f"{CUR_DIR=}")
|
14 |
+
print(f"{ENV_FILE_PATH=}")
|
15 |
+
print(f"{CONFIG_FILE_PATH=}")
|
16 |
+
|
17 |
+
|
18 |
+
@dataclass
|
19 |
+
class InputConfig:
|
20 |
+
model_name: str = 'openai/gpt-3.5-turbo'
|
21 |
+
model_api_key: str = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
22 |
+
max_tokens: int = 1024
|
23 |
+
max_steps: int = 10
|
24 |
+
temperature: float = 0.2
|
25 |
+
timeout: float = 30.0
|
26 |
+
sleeptime: float = 0.0
|
27 |
+
|
28 |
+
@classmethod
|
29 |
+
def load(cls, env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH):
|
30 |
+
# Load env variables
|
31 |
+
load_dotenv(env_file)
|
32 |
+
env_dict = {
|
33 |
+
'model_name': os.getenv('MODEL_NAME', 'not set'),
|
34 |
+
'model_api_key': os.getenv('MODEL_API_KEY', 'not set')
|
35 |
+
}
|
36 |
+
|
37 |
+
# Load config JSON
|
38 |
+
with open(config_file, 'r') as f:
|
39 |
+
config_dict = json.load(f)
|
40 |
+
|
41 |
+
# Combine both
|
42 |
+
return cls(
|
43 |
+
model_name=env_dict.get('model_name', cls.model_name),
|
44 |
+
model_api_key=env_dict.get('model_api_key', cls.model_api_key),
|
45 |
+
max_tokens=config_dict.get('max_tokens', cls.max_tokens),
|
46 |
+
max_steps=config_dict.get('max_steps', cls.max_steps),
|
47 |
+
temperature=config_dict.get('temperature', cls.temperature),
|
48 |
+
timeout=config_dict.get('timeout', cls.timeout),
|
49 |
+
sleeptime=config_dict.get('sleeptime', cls.sleeptime)
|
50 |
+
)
|
51 |
+
|
52 |
+
def save(self, env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH):
|
53 |
+
# Read existing env content if it exists
|
54 |
+
env_vars = {}
|
55 |
+
if os.path.exists(env_file):
|
56 |
+
with open(env_file, 'r') as f:
|
57 |
+
for line in f:
|
58 |
+
if line.strip(): # Ignore empty lines
|
59 |
+
key, value = line.strip().split('=', 1)
|
60 |
+
env_vars[key] = value
|
61 |
+
|
62 |
+
# Update the necessary keys
|
63 |
+
env_vars['MODEL_API_KEY'] = self.model_api_key
|
64 |
+
env_vars['MODEL_NAME'] = self.model_name
|
65 |
+
|
66 |
+
# Write back to the .env file
|
67 |
+
with open(env_file, 'w') as f:
|
68 |
+
for key, value in env_vars.items():
|
69 |
+
f.write(f'{key}={value}\n')
|
70 |
+
|
71 |
+
# Save other parameters to input_config.json
|
72 |
+
config_dict = {
|
73 |
+
'max_tokens': self.max_tokens,
|
74 |
+
'max_steps': self.max_steps,
|
75 |
+
'temperature': self.temperature,
|
76 |
+
'timeout': self.timeout,
|
77 |
+
'sleeptime': self.sleeptime
|
78 |
+
}
|
79 |
+
with open(config_file, 'w') as f:
|
80 |
+
json.dump(config_dict, f, indent=4)
|
81 |
+
|
82 |
+
if not CONFIG_FILE_PATH.exists() or not ENV_FILE_PATH.exists():
|
83 |
+
InputConfig().save()
|
84 |
+
|
app/utils.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
from typing import Generator
|
4 |
+
from textwrap import dedent
|
5 |
+
from litellm.types.utils import ModelResponse
|
6 |
+
from pydantic import ValidationError
|
7 |
+
from core.llms.base_llm import BaseLLM
|
8 |
+
from core.types import ThoughtSteps
|
9 |
+
from core.prompts.think_mark_think import REVIEW_PROMPT, SYSTEM_PROMPT ,FINAL_ANSWER_PROMPT
|
10 |
+
import os
|
11 |
+
import time
|
12 |
+
from core.utils import parse_with_fallback
|
13 |
+
from termcolor import colored
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
def generate_answer(messages: list[dict], max_steps: int = 20, llm: BaseLLM = None, sleeptime: float = 0.0, **kwargs):
|
18 |
+
thoughts = []
|
19 |
+
|
20 |
+
for i in range(max_steps):
|
21 |
+
raw_response = llm.chat(messages, **kwargs)
|
22 |
+
response = raw_response.choices[0].message.content
|
23 |
+
thought = response_parser(response)
|
24 |
+
|
25 |
+
print(colored(f"{i+1} - {response}", 'yellow'))
|
26 |
+
|
27 |
+
if thought:
|
28 |
+
thoughts.append(thought)
|
29 |
+
messages.append({"role": "assistant", "content": thought.model_dump_json()})
|
30 |
+
messages.append({"role": "user", "content": REVIEW_PROMPT})
|
31 |
+
|
32 |
+
yield thought
|
33 |
+
time.sleep(sleeptime)
|
34 |
+
|
35 |
+
# Get the final answer after all thoughts are processed
|
36 |
+
messages += [{"role": "user", "content": FINAL_ANSWER_PROMPT}]
|
37 |
+
raw_final_answers = llm.chat(messages=messages, **kwargs)
|
38 |
+
final_answer = raw_final_answers.choices[0].message.content
|
39 |
+
|
40 |
+
print(colored(f"final answer - {final_answer}", 'green'))
|
41 |
+
|
42 |
+
final_thought = response_parser(final_answer)
|
43 |
+
yield final_thought
|
44 |
+
|
45 |
+
def response_parser(response:str) -> ThoughtSteps:
|
46 |
+
if isinstance(response, str):
|
47 |
+
|
48 |
+
try:
|
49 |
+
thought_kwargs = json.loads(response)
|
50 |
+
thought = ThoughtSteps(**thought_kwargs)
|
51 |
+
except (json.JSONDecodeError, ValidationError):
|
52 |
+
thought = parse_with_fallback(response, ThoughtSteps)
|
53 |
+
|
54 |
+
|
55 |
+
elif isinstance(response, dict):
|
56 |
+
thought = ThoughtSteps(**response)
|
57 |
+
|
58 |
+
return thought
|
59 |
+
|
60 |
+
|
61 |
+
def dict_to_markdown(d:dict) -> str:
|
62 |
+
'''use keys as headers and values as content'''
|
63 |
+
md = ""
|
64 |
+
for key, value in d.items():
|
65 |
+
md += f"### {key}\n"
|
66 |
+
md += f"{value}\n"
|
67 |
+
return md
|
68 |
+
|
cache/aeb9181a-771b-4843-81ed-333364947eb7.jpg
ADDED
core/llms/base_llm.py
CHANGED
@@ -1,22 +1,73 @@
|
|
1 |
from abc import ABC, abstractmethod
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
class BaseLLM(ABC):
|
4 |
|
5 |
-
def __init__(self, api_key, model, tools):
|
6 |
self.api_key = api_key
|
7 |
self.model = model
|
8 |
self.tools = tools
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
@abstractmethod
|
11 |
-
def _chat(self, messages, **kargs):
|
12 |
pass
|
13 |
|
14 |
-
def chat(self, messages, **kargs):
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from abc import ABC, abstractmethod
|
2 |
+
from email import message
|
3 |
+
from urllib import response
|
4 |
+
from litellm.utils import ModelResponse
|
5 |
+
import json
|
6 |
+
from function_schema import get_function_schema
|
7 |
+
from typing import Any, List, Tuple
|
8 |
|
9 |
class BaseLLM(ABC):
|
10 |
|
11 |
+
def __init__(self, api_key:str=None, model:str=None, tools:dict=None):
|
12 |
self.api_key = api_key
|
13 |
self.model = model
|
14 |
self.tools = tools
|
15 |
|
16 |
+
@property
|
17 |
+
def tools_schema(self) -> List[dict] | None:
|
18 |
+
if self.tools:
|
19 |
+
tool_func = self.tools.values()
|
20 |
+
return self.get_tools_schema(tool_func)
|
21 |
+
return None
|
22 |
+
|
23 |
+
|
24 |
@abstractmethod
|
25 |
+
def _chat(self, messages:list[str], **kargs:Any) -> ModelResponse:
|
26 |
pass
|
27 |
|
28 |
+
def chat(self, messages:list, **kargs):
|
29 |
+
message = self._chat(messages, **kargs)
|
30 |
+
message, tool_results = self._handle_tool_calls(message, **kargs)
|
31 |
+
|
32 |
+
if tool_results:
|
33 |
+
print('tool message: ', message)
|
34 |
+
messages.append(message.choices[0].message)
|
35 |
+
for tool_result in tool_results:
|
36 |
+
messages.append(tool_result)
|
37 |
+
|
38 |
+
message = self._chat(messages, **kargs)
|
39 |
+
|
40 |
+
return message
|
41 |
|
42 |
+
|
43 |
+
|
44 |
+
def _handle_tool_calls(self, message:ModelResponse, **kwargs) -> Tuple[ModelResponse, List[dict]]:
|
45 |
+
|
46 |
+
if (self.tools is None) or (message.choices[0].finish_reason != 'tool_calls'):
|
47 |
+
return message, None
|
48 |
+
|
49 |
+
tool_results = []
|
50 |
+
tools_to_call = message.choices[0].message.tool_calls
|
51 |
+
for tool in tools_to_call:
|
52 |
+
tool_args = json.loads(tool.function.arguments)
|
53 |
+
tool_func = self.tools.get(tool.function.name, None)
|
54 |
+
if tool_func:
|
55 |
+
print("Calling tool: ", tool.function.name)
|
56 |
+
tool_result = tool_func(**tool_args)
|
57 |
+
print("Result of tool: ", tool_result)
|
58 |
+
|
59 |
+
|
60 |
+
tool_results.append({
|
61 |
+
'role': 'tool',
|
62 |
+
"tool_call_id": tool.id,
|
63 |
+
'name': tool.function.name,
|
64 |
+
'content': str(tool_result),
|
65 |
+
})
|
66 |
+
return message, tool_results
|
67 |
+
|
68 |
+
def get_tools_schema(self, tools):
|
69 |
+
def make_schema(tool):
|
70 |
+
return {'type': 'function',
|
71 |
+
'function': get_function_schema(tool)}
|
72 |
+
|
73 |
+
return [make_schema(tool) for tool in tools]
|
core/llms/litellm_llm.py
CHANGED
@@ -1,29 +1,19 @@
|
|
1 |
from openai.types.chat.chat_completion_chunk import Choice
|
2 |
from .base_llm import BaseLLM
|
|
|
3 |
from litellm import completion
|
|
|
|
|
4 |
from litellm.utils import ModelResponse
|
5 |
from openai import OpenAI
|
|
|
6 |
|
7 |
class LLM(BaseLLM):
|
8 |
-
def __init__(self, api_key, model, tools:
|
9 |
super().__init__(api_key=api_key, model=model, tools=tools)
|
10 |
|
|
|
11 |
|
12 |
def _chat(self, messages, **kwargs):
|
13 |
-
return completion(messages=messages, model=self.model, api_key=self.api_key, **kwargs)
|
14 |
|
15 |
-
def _handle_tool_calls(self, messages:list[ModelResponse], **kwargs):
|
16 |
-
|
17 |
-
message = messages[-1]
|
18 |
-
|
19 |
-
if self._tools is None:
|
20 |
-
return messages
|
21 |
-
|
22 |
-
if message.choices[0].finish_reason in ('stop', 'length'):
|
23 |
-
return messages
|
24 |
-
elif message.choices[0].finish_reason == 'tool_calls':
|
25 |
-
tools_to_call = message.choices[0].message.tool_calls
|
26 |
-
for tool in tools_to_call:
|
27 |
-
print(tool)
|
28 |
-
|
29 |
-
|
|
|
1 |
from openai.types.chat.chat_completion_chunk import Choice
|
2 |
from .base_llm import BaseLLM
|
3 |
+
from typing import List, Tuple
|
4 |
from litellm import completion
|
5 |
+
import json
|
6 |
+
from litellm import batch_completion
|
7 |
from litellm.utils import ModelResponse
|
8 |
from openai import OpenAI
|
9 |
+
from litellm.utils import trim_messages
|
10 |
|
11 |
class LLM(BaseLLM):
|
12 |
+
def __init__(self, api_key:str=None, model:str=None, tools:dict[str, callable]=None):
|
13 |
super().__init__(api_key=api_key, model=model, tools=tools)
|
14 |
|
15 |
+
|
16 |
|
17 |
def _chat(self, messages, **kwargs):
|
18 |
+
return completion(messages=messages, model=self.model, api_key=self.api_key,tools=self.tools_schema, **kwargs)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
core/llms/utils.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## supporting functions
|
2 |
+
import base64, io
|
3 |
+
from typing import Any, Union, List
|
4 |
+
from PIL import Image # Pillow image library
|
5 |
+
|
6 |
+
# thanks to https://community.openai.com/t/how-to-load-a-local-image-to-gpt4-vision-using-api/533090/5
|
7 |
+
|
8 |
+
def resize_image(image, max_dimension):
|
9 |
+
width, height = image.size
|
10 |
+
|
11 |
+
# Check if the image has a palette and convert it to true color mode
|
12 |
+
if image.mode == "P":
|
13 |
+
if "transparency" in image.info:
|
14 |
+
image = image.convert("RGBA")
|
15 |
+
else:
|
16 |
+
image = image.convert("RGB")
|
17 |
+
|
18 |
+
if width > max_dimension or height > max_dimension:
|
19 |
+
if width > height:
|
20 |
+
new_width = max_dimension
|
21 |
+
new_height = int(height * (max_dimension / width))
|
22 |
+
else:
|
23 |
+
new_height = max_dimension
|
24 |
+
new_width = int(width * (max_dimension / height))
|
25 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
26 |
+
|
27 |
+
return image
|
28 |
+
|
29 |
+
def convert_to_png(image):
|
30 |
+
with io.BytesIO() as output:
|
31 |
+
image.save(output, format="PNG")
|
32 |
+
return output.getvalue()
|
33 |
+
|
34 |
+
def create_image_content(image):
|
35 |
+
return {
|
36 |
+
"type": "image_url",
|
37 |
+
"image_url": {"url": f"data:image/jpeg;base64,{image}"}
|
38 |
+
}
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
def get_attr(attr:str, kwargs:dict, cls:Any=None) -> Any:
|
44 |
+
attribute = kwargs.get(attr, None) if kwargs else None
|
45 |
+
if (attribute is None) and (cls is not None):
|
46 |
+
if hasattr(cls, attr):
|
47 |
+
attribute = getattr(cls, attr)
|
48 |
+
return attribute
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
def process_image(image: Union[str, Image.Image], max_size: int) -> str:
|
53 |
+
if isinstance(image, str):
|
54 |
+
with Image.open(image) as img:
|
55 |
+
return process_pillow_image(img, max_size)
|
56 |
+
elif isinstance(image, Image.Image):
|
57 |
+
return process_pillow_image(image, max_size)
|
58 |
+
else:
|
59 |
+
raise ValueError("Input must be either a file path (str) or a Pillow Image object")
|
60 |
+
|
61 |
+
|
62 |
+
def process_pillow_image(image: Image.Image, max_size: int) -> str:
|
63 |
+
width, height = image.size
|
64 |
+
mimetype = image.get_format_mimetype() if hasattr(image, 'get_format_mimetype') else "image/png"
|
65 |
+
|
66 |
+
if mimetype == "image/png" and width <= max_size and height <= max_size:
|
67 |
+
img_byte_arr = io.BytesIO()
|
68 |
+
image.save(img_byte_arr, format='PNG')
|
69 |
+
return base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
|
70 |
+
else:
|
71 |
+
resized_image = resize_image(image, max_size)
|
72 |
+
png_image = convert_to_png(resized_image)
|
73 |
+
return base64.b64encode(png_image).decode('utf-8')
|
74 |
+
|
75 |
+
def user_message_with_images(
|
76 |
+
user_msg_str: str,
|
77 |
+
images: List[Union[str, Image.Image]]|None = None,
|
78 |
+
max_size_px: int = 1024,
|
79 |
+
) -> dict:
|
80 |
+
if images is None:
|
81 |
+
images = []
|
82 |
+
|
83 |
+
base64_images = [process_image(img, max_size_px) for img in images]
|
84 |
+
|
85 |
+
content = [{"type": "text", "text": user_msg_str}]
|
86 |
+
content += [create_image_content(image) for image in base64_images]
|
87 |
+
|
88 |
+
return {"role": "user", "content": content}
|
core/prompts/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
core/prompts/chat_mark_chat.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
SYSTEM_PROMPT_FOR_CHAT = 'You are a helpful assistant.'
|
core/prompts/think_mark_think.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
SYSTEM_PROMPT = """
|
5 |
+
You are the Great Thinker, sitting on a stone naked, there is problem in front of you. Your wisdom is to
|
6 |
+
approach any question/problem/solution/answer with logic, you critic it, you question it on different levels to see if the answer holds,
|
7 |
+
from simple tasks to complex existential dilemmas, You can use a structured set of questions to enhance reasoning, understanding, and confidence in the results.
|
8 |
+
- If given an structured problem with thoughts and solutions, try to take a different/alternative thought process.
|
9 |
+
- First rewrite the problem/question while elaborating the problem with more details and more words and simplificaiton.
|
10 |
+
- Look for details the problem/question may have, find the insights in the problem/question.
|
11 |
+
- Pay attention to the details of the problem/question
|
12 |
+
- What domain knowledge someone has to know before answering the question?
|
13 |
+
- Prepare few similar questions around the problem that supports the main questions/problem.
|
14 |
+
- Have a internal monologue, and then generate an answer based on the internal monologue.
|
15 |
+
- Your thoughts may contain combination of the following (not necessarily but will help):
|
16 |
+
Clarification, Context, Decomposition, Resources, Analysis, Alternatives, Implications, Validation, Reflection, Application, critic
|
17 |
+
- You have freedom of using any logical way to think about the problem
|
18 |
+
|
19 |
+
you should do all this in a json format given below, roll out your thoughts in thoughts field, and if you need to use more steps, set next_step to true, else set it to false, and generate an answer in answer field.
|
20 |
+
these steps are just a structured way to think about the problem, different problems have different approach.
|
21 |
+
|
22 |
+
Instructions
|
23 |
+
- Generate a json code block with this schema , keys: thought, step_title, answer, critic, next_step, final_answer
|
24 |
+
- Your thinking should happen inside the thought in json
|
25 |
+
- Only one dictionary in the json code block, Exactly one dictionary in the json code block
|
26 |
+
- Should start with ```json and end with ```
|
27 |
+
- Very Elaborated Thought process
|
28 |
+
|
29 |
+
```json
|
30 |
+
{
|
31 |
+
"thought":"internal monologue, this contails your questions, explorations, clarifications, rectifications, analysis and answers.Think step by step: Prepare few similar questions around the problem that supports the main questions/problem it, have a internal monologue, and then generate an answer based on the internal monologue. Your thoughts may contain the following (not necessarily ) - Clarification, Context, Decomposition, Resources, Analysis, Alternatives, Implications, Validation, Reflection, Application", # use this space as scratchpad for your mind
|
32 |
+
"step_title":" name this steps based on thoughts",
|
33 |
+
"answer":"answer or rectified answer to the problem/question, generate an answer based on inner thoughts " ,
|
34 |
+
"critic" : "criticize the answer, try to prove it wrong , have a different perspective, fight it",
|
35 |
+
"next_step":true/false, # boolean value - Given and answer and critic , Does the problem require more thinking/ more iteration of self reviewing/more revisions? if yes then set to true, else set to false
|
36 |
+
"is_final_answer":false, # boolean value - this is not final answer , always false, (this is just dummy field to identify the final answer, always false)
|
37 |
+
|
38 |
+
}
|
39 |
+
```
|
40 |
+
"""
|
41 |
+
|
42 |
+
REVIEW_PROMPT= """
|
43 |
+
You are now an impartial critic tasked with reviewing the problem, thoughts, and proposed solution. Your goal is to challenge assumptions, identify potential flaws, and explore alternative perspectives. Follow these steps:
|
44 |
+
Think step by step:
|
45 |
+
1. Restate the problem in your own words, ensuring you've captured all key elements.
|
46 |
+
2. Identify and question any assumptions made in the problem statement or proposed solution.
|
47 |
+
3. Consider the context: Are there any relevant factors or constraints that might have been overlooked?
|
48 |
+
4. Explore alternative viewpoints: How might someone from a different background or field approach this problem?
|
49 |
+
5. Evaluate the proposed solution:
|
50 |
+
- What are its strengths and weaknesses?
|
51 |
+
- Are there any potential unintended consequences?
|
52 |
+
- How robust is it to changes in the problem parameters?
|
53 |
+
6. Generate at least one alternative solution or approach to the problem.
|
54 |
+
7. Compare and contrast your alternative with the original solution.
|
55 |
+
8. Identify any areas where additional information or expertise might be needed to make a more informed decision.
|
56 |
+
9. Summarize your critical analysis, highlighting key insights and areas for further consideration.
|
57 |
+
|
58 |
+
Instructions
|
59 |
+
- Do not start the review with "Review the solution"
|
60 |
+
- Do not start with the same line as previous answers, you look boring.
|
61 |
+
Remember to maintain a balanced and objective perspective throughout your review. Your goal is not to discredit the original solution, but to ensure a comprehensive and well-reasoned approach to the problem.
|
62 |
+
|
63 |
+
Provide your review in the structured JSON format as specified in the SYSTEM_PROMPT, using the 'thought' field for your detailed analysis and the 'critic' field for a concise summary of your key critiques and alternative viewpoints."
|
64 |
+
|
65 |
+
```json
|
66 |
+
{
|
67 |
+
"thought":"internal monologue, this contails your questions, explorations, clarifications, rectifications, analysis and answers. Prepare few similar questions around the problem that supports the main questions/problem it, have a internal monologue, and then generate an answer based on the internal monologue. Your thoughts may contain the following (not necessarily ) - Clarification, Context, Decomposition, Resources, Analysis, Alternatives, Implications, Validation, Reflection, Application", # use this space as scratchpad for your mind
|
68 |
+
"step_title":" name this steps based on thoughts",
|
69 |
+
"answer":"answer or rectified answer to the problem/question, generate an answer based on inner thoughts " ,
|
70 |
+
"critic" : "criticize the answer, try to prove it wrong , have a different perspective, fight it",
|
71 |
+
"next_step":true/false, # boolean value - Given and answer and critic , Does the problem require more thinking/ more iteration of self reviewing/more revisions? if yes then set to true, else set to false
|
72 |
+
"is_final_answer":false, # boolean value - this is not final answer , always false, (this is just dummy field to identify the final answer, always false)
|
73 |
+
|
74 |
+
}
|
75 |
+
```
|
76 |
+
|
77 |
+
"""
|
78 |
+
|
79 |
+
FINAL_ANSWER_PROMPT = """
|
80 |
+
Review you flow of thoughts and generate a final answer to the problem/question. Strictly in json format in a code block with this schema, Think inside the json.
|
81 |
+
|
82 |
+
Instructions
|
83 |
+
- Generate a json code block with this schema , keys: thought, step_title, answer, next_step
|
84 |
+
- Your thinking should happen inside the thought in json
|
85 |
+
- Only one dictionary in the json code block
|
86 |
+
- Should start with ```json and end with ```
|
87 |
+
- Very Elaborated Thought process
|
88 |
+
|
89 |
+
|
90 |
+
```json
|
91 |
+
{
|
92 |
+
"thought":"final conclusion from the thoughts, formulate last and final thought process for the final answer,Think step by step: take all the thoughts and considerations that went into the final answer.User is not gonna see previous thoughts so do not acknowledge them, those are thoughts, have them, here you will give a final thoughts on how you reached to the answer , what are the thinks you considered, and other necessary things that let to the answer, do not say, review thoughts, summing of or that kind of thing.
|
93 |
+
"step_title":" name this steps based on thoughts",
|
94 |
+
"answer":"final answer or rectified answer to the problem/question" , # generate an answer based on inner thoughts
|
95 |
+
"critic" : "review the final answer", # criticize the answer, if it is wrong, then correct it
|
96 |
+
"next_step":false, # boolean value - this is final answer no next step required,
|
97 |
+
"is_final_answer":true, # boolean value - this is final answer no next step required,
|
98 |
+
}
|
99 |
+
```
|
100 |
+
"""
|
101 |
+
|
102 |
+
SYSTEM_PROMPT2="""
|
103 |
+
You are the Analytical Sage, a master of critical thinking and logical reasoning. Your task is to approach any question, problem, or proposed solution with rigorous analysis and systematic thinking. Follow these guidelines:
|
104 |
+
|
105 |
+
1. Problem Restatement:
|
106 |
+
- Rewrite the problem/question, elaborating with more details and simplifying if necessary.
|
107 |
+
- Identify key components, constraints, and objectives.
|
108 |
+
|
109 |
+
2. Contextual Analysis:
|
110 |
+
- Examine the problem's context and background.
|
111 |
+
- Identify relevant domains of knowledge required to address the issue.
|
112 |
+
- Consider historical, cultural, or disciplinary perspectives that might influence the problem or its solutions.
|
113 |
+
|
114 |
+
3. Decomposition and Clarification:
|
115 |
+
- Break down complex problems into smaller, manageable components.
|
116 |
+
- Clarify any ambiguous terms or concepts.
|
117 |
+
- Formulate precise sub-questions that need to be answered.
|
118 |
+
|
119 |
+
4. Assumption Identification:
|
120 |
+
- Explicitly state any assumptions underlying the problem or proposed solutions.
|
121 |
+
- Question these assumptions and consider their validity.
|
122 |
+
|
123 |
+
5. Logical Analysis:
|
124 |
+
- Apply deductive and inductive reasoning to explore the problem.
|
125 |
+
- Identify logical fallacies or weak points in existing arguments.
|
126 |
+
- Use formal logic structures when appropriate (e.g., if-then statements, syllogisms).
|
127 |
+
|
128 |
+
6. Data and Evidence Evaluation:
|
129 |
+
- Assess the quality and relevance of available information.
|
130 |
+
- Identify gaps in data or knowledge that might affect the solution.
|
131 |
+
- Consider the reliability and potential biases of information sources.
|
132 |
+
|
133 |
+
7. Alternative Perspectives:
|
134 |
+
- Deliberately adopt different viewpoints to challenge your initial understanding.
|
135 |
+
- Consider how experts from various fields might approach the problem.
|
136 |
+
- Engage in counterfactual thinking: 'What if the opposite were true?'
|
137 |
+
|
138 |
+
8. Solution Generation and Evaluation:
|
139 |
+
- Develop multiple potential solutions or approaches.
|
140 |
+
- Critically evaluate each solution, considering pros, cons, and potential consequences.
|
141 |
+
- Use decision-making frameworks (e.g., cost-benefit analysis, SWOT analysis) when appropriate.
|
142 |
+
|
143 |
+
9. Synthesis and Conclusion:
|
144 |
+
- Integrate insights from your analysis to form a comprehensive understanding.
|
145 |
+
- Develop a well-reasoned answer or solution, acknowledging any remaining uncertainties or limitations.
|
146 |
+
|
147 |
+
10. Meta-cognitive Reflection:
|
148 |
+
- Reflect on your thinking process. What strategies did you use? Were they effective?
|
149 |
+
- Consider potential biases in your own reasoning and how they might have influenced your conclusion.
|
150 |
+
|
151 |
+
Throughout this process, maintain an internal monologue in the 'thought' field of your JSON output. Use this space to explore ideas, ask yourself probing questions, and document your reasoning process. In the 'critic' field, challenge your own conclusions and consider alternative interpretations.
|
152 |
+
|
153 |
+
Remember to structure your response in the specified JSON format, using the fields: thought, step_title, answer, critic, next_step, and is_final_answer. Your goal is to provide a thorough, logical, and well-reasoned analysis of the problem at hand."
|
154 |
+
|
155 |
+
```json
|
156 |
+
{
|
157 |
+
"thought":"internal monologue, this contails your questions, explorations, clarifications, rectifications, analysis and answers. Prepare few similar questions around the problem that supports the main questions/problem it, have a internal monologue, and then generate an answer based on the internal monologue. Your thoughts may contain the following (not necessarily ) - Clarification, Context, Decomposition, Resources, Analysis, Alternatives, Implications, Validation, Reflection, Application", # use this space as scratchpad for your mind
|
158 |
+
"step_title":" name this steps based on thoughts",
|
159 |
+
"answer":"answer or rectified answer to the problem/question, generate an answer based on inner thoughts " ,
|
160 |
+
"critic" : "criticize the answer, try to prove it wrong , have a different perspective, fight it",
|
161 |
+
"next_step":true/false, # boolean value - Given and answer and critic , Does the problem require more thinking/ more iteration of self reviewing/more revisions? if yes then set to true, else set to false
|
162 |
+
"is_final_answer":false, # boolean value - this is not final answer , always false, (this is just dummy field to identify the final answer, always false)
|
163 |
+
|
164 |
+
}
|
165 |
+
```
|
166 |
+
"""
|
core/types.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from textwrap import dedent
|
3 |
+
|
4 |
+
from pydantic import BaseModel, Field, ValidationError
|
5 |
+
import json
|
6 |
+
from textwrap import dedent
|
7 |
+
|
8 |
+
class ThoughtSteps(BaseModel):
|
9 |
+
step_title: str = Field(..., description="steps to use for the problem/question")
|
10 |
+
thought: str = Field(..., description="internal monologue, this contails your questions and its answers")
|
11 |
+
next_step: bool = Field(..., description="Does the problem require more thinking? if yes then set to true, else set to false,")
|
12 |
+
answer: str | None = Field(..., description="generate a answer based on inner thoughts")
|
13 |
+
critic: str | None = Field(..., description="criticize the answer, try to prove it wrong , have a different perspective, fight it")
|
14 |
+
is_final_answer: bool = Field(..., description="this is final answer no next step required,")
|
15 |
+
|
16 |
+
def to_thought_steps_display(self):
|
17 |
+
return ThoughtStepsDisplay(
|
18 |
+
step_title=self.step_title,
|
19 |
+
thought=self.thought,
|
20 |
+
answer=self.answer,
|
21 |
+
critic=self.critic
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
class ThoughtStepsDisplay(BaseModel):
|
26 |
+
step_title:str
|
27 |
+
thought:str
|
28 |
+
answer:str
|
29 |
+
critic:str
|
30 |
+
|
31 |
+
|
32 |
+
def md(self):
|
33 |
+
return dedent(f'''
|
34 |
+
{self.step_title}
|
35 |
+
### Thought
|
36 |
+
{self.thought}
|
37 |
+
### Answer
|
38 |
+
{self.answer}
|
39 |
+
### Critic
|
40 |
+
{self.critic}
|
41 |
+
''')
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
class BigMessage(BaseModel):
|
46 |
+
role:str
|
47 |
+
content:ThoughtSteps | str
|
48 |
+
thoughts:list[ThoughtSteps|None]|None = Field(default_factory=list)
|
49 |
+
|
50 |
+
def to_message(self):
|
51 |
+
return {
|
52 |
+
"role": self.role,
|
53 |
+
"content": self.content.model_dump_json() if isinstance(self.content, ThoughtSteps) else self.content,
|
54 |
+
}
|
55 |
+
|
56 |
+
class Message(BaseModel):
|
57 |
+
role:str
|
58 |
+
content:str
|
core/utils.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
import re
|
4 |
+
import json
|
5 |
+
import ast
|
6 |
+
from turtle import color
|
7 |
+
from pydantic import ValidationError
|
8 |
+
from termcolor import colored
|
9 |
+
|
10 |
+
def extract_code_block(text):
|
11 |
+
code_block = re.findall(r'```(?:json)?\s*({.*?})\s*```', text, re.DOTALL)
|
12 |
+
|
13 |
+
if code_block:
|
14 |
+
try:
|
15 |
+
return [ast.literal_eval(block) for block in code_block]
|
16 |
+
except (SyntaxError, ValueError) as e:
|
17 |
+
return f"Error parsing code block: {e}"
|
18 |
+
return None
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
def extract_code_block(text):
|
23 |
+
code_block = re.findall(r'```(?:json)?\s*({.*?})\s*```', text, re.DOTALL)
|
24 |
+
|
25 |
+
if code_block:
|
26 |
+
try:
|
27 |
+
return [json.loads(block) for block in code_block]
|
28 |
+
except json.JSONDecodeError:
|
29 |
+
return None
|
30 |
+
return None
|
31 |
+
|
32 |
+
# def fallback_extract(text, expected_keys):
|
33 |
+
# fallback_dict = {}
|
34 |
+
# for i, key in enumerate(expected_keys):
|
35 |
+
# match = re.search(rf'"{key}"\s*:\s*([^\s,]+)', text)
|
36 |
+
# if match:
|
37 |
+
# value = match.group(1).strip('"').strip(',')
|
38 |
+
# if value.isdigit():
|
39 |
+
# fallback_dict[key] = int(value)
|
40 |
+
# elif re.match(r'^\{.*\}$', value): # Detect dictionary structure
|
41 |
+
# try:
|
42 |
+
# fallback_dict[key] = json.loads(value)
|
43 |
+
# except json.JSONDecodeError:
|
44 |
+
# fallback_dict[key] = value # Leave it as a string if malformed
|
45 |
+
# else:
|
46 |
+
# fallback_dict[key] = value
|
47 |
+
# else:
|
48 |
+
# fallback_dict[key] = None # If the key is not found, set it to None
|
49 |
+
# return fallback_dict
|
50 |
+
|
51 |
+
def fallback_extract(text, expected_keys):
|
52 |
+
fallback_dict = {}
|
53 |
+
pattern = r'"({})"\s*:\s*(.*?)(?="(?:{})"|\Z)'.format(
|
54 |
+
'|'.join(re.escape(key) for key in expected_keys),
|
55 |
+
'|'.join(re.escape(key) for key in expected_keys)
|
56 |
+
)
|
57 |
+
|
58 |
+
matches = re.finditer(pattern, text, re.DOTALL)
|
59 |
+
|
60 |
+
for match in matches:
|
61 |
+
key, value = match.groups()
|
62 |
+
value = value.strip().rstrip(',').strip()
|
63 |
+
|
64 |
+
if value.isdigit():
|
65 |
+
fallback_dict[key] = int(value)
|
66 |
+
elif value.lower() in ['true', 'false']:
|
67 |
+
fallback_dict[key] = value.lower() == 'true'
|
68 |
+
elif re.match(r'^\{.*\}$', value): # Detect dictionary structure
|
69 |
+
try:
|
70 |
+
fallback_dict[key] = json.loads(value)
|
71 |
+
except json.JSONDecodeError:
|
72 |
+
fallback_dict[key] = value # Leave it as a string if malformed
|
73 |
+
else:
|
74 |
+
# Remove surrounding quotes if present
|
75 |
+
fallback_dict[key] = value.strip('"')
|
76 |
+
|
77 |
+
# Add any missing keys with None value
|
78 |
+
for key in expected_keys:
|
79 |
+
if key not in fallback_dict:
|
80 |
+
fallback_dict[key] = None
|
81 |
+
|
82 |
+
return fallback_dict
|
83 |
+
|
84 |
+
# Main function to handle parsing with fallback
|
85 |
+
def parse_with_fallback(text, pydantic_class):
|
86 |
+
# Extract expected keys from the Pydantic class
|
87 |
+
expected_keys = list(pydantic_class.__fields__.keys())
|
88 |
+
|
89 |
+
# First try to extract clean JSON blocks
|
90 |
+
parsed_blocks = extract_code_block(text)
|
91 |
+
|
92 |
+
if parsed_blocks:
|
93 |
+
# Validate and return parsed data
|
94 |
+
try:
|
95 |
+
classes = [pydantic_class(**block) for block in parsed_blocks]
|
96 |
+
print(colored('used code block', 'red'))
|
97 |
+
print(colored('Got this: {0}'.format(classes[0]), 'red'))
|
98 |
+
print(colored('from this: {0}'.format(text), 'cyan'))
|
99 |
+
|
100 |
+
return classes[0]
|
101 |
+
except ValidationError as e:
|
102 |
+
print("Validation error:", e)
|
103 |
+
|
104 |
+
# Fallback to manually extracting key-value pairs
|
105 |
+
fallback_data = fallback_extract(text, expected_keys)
|
106 |
+
|
107 |
+
try:
|
108 |
+
# Try to validate the fallback data with the Pydantic class
|
109 |
+
print(colored('used fallback', 'red'))
|
110 |
+
print(colored('Got this: {0}'.format(fallback_data), 'red'))
|
111 |
+
print(colored('from this: {0}'.format(text), 'cyan'))
|
112 |
+
|
113 |
+
return pydantic_class(**fallback_data)
|
114 |
+
except ValidationError as e:
|
115 |
+
return None
|
hehe.jpg
ADDED
input_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_tokens": 1024,
|
3 |
+
"max_steps": 3,
|
4 |
+
"temperature": 0.2,
|
5 |
+
"timeout": 30.0,
|
6 |
+
"sleeptime": 2.0
|
7 |
+
}
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "open-o1"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["tikendraw <[email protected]>"]
|
6 |
+
license = "Apache License 2.0"
|
7 |
+
readme = "README.md"
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = "^3.11"
|
11 |
+
litellm = "^1.46.4"
|
12 |
+
streamlit = "^1.38.0"
|
13 |
+
openai = "^1.46.0"
|
14 |
+
python-dotenv = "^1.0.1"
|
15 |
+
function-schema = "^0.4.2"
|
16 |
+
termcolor = "^2.4.0"
|
17 |
+
|
18 |
+
|
19 |
+
[build-system]
|
20 |
+
requires = ["poetry-core"]
|
21 |
+
build-backend = "poetry.core.masonry.api"
|
sometext.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ten.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
|
3 |
+
@dataclass
|
4 |
+
class ThoughtSteps:
|
5 |
+
step_title:str
|
6 |
+
thought:str
|
7 |
+
answer:str
|
8 |
+
critic:str
|
9 |
+
|
10 |
+
|
11 |
+
t = ThoughtSteps(step_title="test", thought="thought", answer="answer", critic="critic")
|
12 |
+
print(t)
|
tt.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import litellm
|
2 |
+
|
3 |
+
|
4 |
+
litellm
|