Spaces:
Runtime error
Runtime error
ZackBradshaw
commited on
Commit
•
e67043b
1
Parent(s):
086993c
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .env.example +3 -0
- BabyagiTools.py +367 -0
- README.md +2 -8
- __pycache__/BabyagiTools.cpython-310.pyc +0 -0
- __pycache__/apitool.cpython-310.pyc +0 -0
- __pycache__/executor.cpython-310.pyc +0 -0
- __pycache__/logger.cpython-310.pyc +0 -0
- __pycache__/logging.cpython-310.pyc +0 -0
- __pycache__/registry.cpython-310.pyc +0 -0
- __pycache__/retriever.cpython-310.pyc +0 -0
- __pycache__/serve.cpython-310.pyc +0 -0
- __pycache__/singletool.cpython-310.pyc +0 -0
- __pycache__/tool.cpython-310.pyc +0 -0
- __pycache__/tool_logging.cpython-310.pyc +0 -0
- __pycache__/tool_server.cpython-310.pyc +0 -0
- __pycache__/tools_controller.cpython-310.pyc +0 -0
- __pycache__/translator.cpython-310.pyc +0 -0
- agent/BabyagiTools.py +368 -0
- agent/apitool.py +168 -0
- agent/autogpt/__init__.py +0 -0
- agent/autogpt/agent.py +137 -0
- agent/autogpt/output_parser.py +66 -0
- agent/autogpt/prompt.py +75 -0
- agent/autogpt/prompt_generator.py +189 -0
- agent/autogptmulti/__init__.py +0 -0
- agent/autogptmulti/agent.py +138 -0
- agent/autogptmulti/output_parser.py +66 -0
- agent/autogptmulti/prompt.py +68 -0
- agent/autogptmulti/prompt_generator.py +191 -0
- agent/executor.py +123 -0
- agent/singletool.py +270 -0
- agent/tools_controller.py +155 -0
- agent/translator.py +125 -0
- agent_tools.py +59 -0
- airbnb/__init__.py +8 -0
- airbnb/api.py +354 -0
- airbnb/readme.md +29 -0
- airbnb/test.py +11 -0
- apitool.py +168 -0
- app.py +380 -0
- arxiv/__init__.py +8 -0
- arxiv/api.py +50 -0
- arxiv/readme.md +38 -0
- arxiv/test.py +11 -0
- bing_search/__init__.py +8 -0
- bing_search/api.py +201 -0
- bing_search/readme.md +3 -0
- bing_search/test_bing.py +60 -0
- chemical/__init__.py +8 -0
- chemical/prop/__init__.py +1 -0
.env.example
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
OPENAI_BASE_URL=https://api.openai.com
|
2 |
+
OPENAI_API_KEY=sk-1234567890
|
3 |
+
|
BabyagiTools.py
ADDED
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import deque
|
2 |
+
from typing import Dict, List, Optional, Any
|
3 |
+
import re
|
4 |
+
|
5 |
+
from langchain import LLMChain, OpenAI, PromptTemplate, SerpAPIWrapper
|
6 |
+
from langchain.embeddings import OpenAIEmbeddings
|
7 |
+
from langchain.llms import BaseLLM
|
8 |
+
from langchain.vectorstores.base import VectorStore
|
9 |
+
from pydantic import BaseModel, Field
|
10 |
+
from langchain.chains.base import Chain
|
11 |
+
|
12 |
+
from langchain.vectorstores import FAISS
|
13 |
+
import faiss
|
14 |
+
from langchain.docstore import InMemoryDocstore
|
15 |
+
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
|
16 |
+
from executor import Executor
|
17 |
+
|
18 |
+
|
19 |
+
class ContextAwareAgent(ZeroShotAgent):
|
20 |
+
def get_full_inputs(self, intermediate_steps, **kwargs: Any) -> Dict[str, Any]:
|
21 |
+
"""Create the full inputs for the LLMChain from intermediate steps."""
|
22 |
+
thoughts = self._construct_scratchpad(intermediate_steps)
|
23 |
+
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
|
24 |
+
full_inputs = {**kwargs, **new_inputs}
|
25 |
+
return full_inputs
|
26 |
+
|
27 |
+
def _construct_scratchpad(self, intermediate_steps):
|
28 |
+
"""Construct the scratchpad that lets the agent continue its thought process."""
|
29 |
+
thoughts = ""
|
30 |
+
# only modify the following line, [-2: ]
|
31 |
+
for action, observation in intermediate_steps[-2:]:
|
32 |
+
thoughts += action.log
|
33 |
+
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
|
34 |
+
if "is not a valid tool, try another one" in observation:
|
35 |
+
thoughts += (
|
36 |
+
"You should select another tool rather than the invalid one.\n"
|
37 |
+
)
|
38 |
+
return thoughts
|
39 |
+
|
40 |
+
|
41 |
+
class TaskCreationChain(LLMChain):
|
42 |
+
"""Chain to generates tasks."""
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
46 |
+
"""Get the response parser."""
|
47 |
+
task_creation_template = (
|
48 |
+
"You are an task creation AI that uses the result of an execution agent"
|
49 |
+
" to create new tasks with the following objective: {objective},"
|
50 |
+
" The last completed task has the result: {result}."
|
51 |
+
" This result was based on this task description: {task_description}."
|
52 |
+
" These are incomplete tasks: {incomplete_tasks}."
|
53 |
+
" Based on the result, create new tasks to be completed"
|
54 |
+
" by the AI system that do not overlap with incomplete tasks."
|
55 |
+
" For a simple objective, do not generate complex todo lists."
|
56 |
+
" Do not generate repetitive tasks (e.g., tasks that have already been completed)."
|
57 |
+
" If there is not futher task needed to complete the objective, return NO TASK."
|
58 |
+
" Now return the tasks as an array."
|
59 |
+
)
|
60 |
+
prompt = PromptTemplate(
|
61 |
+
template=task_creation_template,
|
62 |
+
input_variables=[
|
63 |
+
"result",
|
64 |
+
"task_description",
|
65 |
+
"incomplete_tasks",
|
66 |
+
"objective",
|
67 |
+
],
|
68 |
+
)
|
69 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
70 |
+
|
71 |
+
|
72 |
+
class InitialTaskCreationChain(LLMChain):
|
73 |
+
"""Chain to generates tasks."""
|
74 |
+
|
75 |
+
@classmethod
|
76 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
77 |
+
"""Get the response parser."""
|
78 |
+
task_creation_template = "You are a planner who is an expert at coming up with a todo list for a given objective. For a simple objective, do not generate a complex todo list. Generate the first (only one) task needed to do for this objective: {objective}"
|
79 |
+
prompt = PromptTemplate(
|
80 |
+
template=task_creation_template,
|
81 |
+
input_variables=["objective"],
|
82 |
+
)
|
83 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
84 |
+
|
85 |
+
|
86 |
+
class TaskPrioritizationChain(LLMChain):
|
87 |
+
"""Chain to prioritize tasks."""
|
88 |
+
|
89 |
+
@classmethod
|
90 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
91 |
+
"""Get the response parser."""
|
92 |
+
task_prioritization_template = (
|
93 |
+
"You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
|
94 |
+
" the following tasks: {task_names}."
|
95 |
+
" Consider the ultimate objective of your team: {objective}."
|
96 |
+
" Do not make up any tasks, just reorganize the existing tasks."
|
97 |
+
" Do not remove any tasks. Return the result as a numbered list, like:"
|
98 |
+
" #. First task"
|
99 |
+
" #. Second task"
|
100 |
+
" Start the task list with number {next_task_id}. (e.g., 2. ***, 3. ***, etc.)"
|
101 |
+
)
|
102 |
+
prompt = PromptTemplate(
|
103 |
+
template=task_prioritization_template,
|
104 |
+
input_variables=["task_names", "next_task_id", "objective"],
|
105 |
+
)
|
106 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
107 |
+
|
108 |
+
|
109 |
+
def get_next_task(
|
110 |
+
task_creation_chain: LLMChain,
|
111 |
+
result: Dict,
|
112 |
+
task_description: str,
|
113 |
+
task_list: List[str],
|
114 |
+
objective: str,
|
115 |
+
) -> List[Dict]:
|
116 |
+
"""Get the next task."""
|
117 |
+
incomplete_tasks = ", ".join(task_list)
|
118 |
+
response = task_creation_chain.run(
|
119 |
+
result=result,
|
120 |
+
task_description=task_description,
|
121 |
+
incomplete_tasks=incomplete_tasks,
|
122 |
+
objective=objective,
|
123 |
+
)
|
124 |
+
# change the split method to re matching
|
125 |
+
# new_tasks = response.split('\n')
|
126 |
+
task_pattern = re.compile(r"\d+\. (.+?)\n")
|
127 |
+
new_tasks = task_pattern.findall(response)
|
128 |
+
|
129 |
+
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
|
130 |
+
|
131 |
+
|
132 |
+
def prioritize_tasks(
|
133 |
+
task_prioritization_chain: LLMChain,
|
134 |
+
this_task_id: int,
|
135 |
+
task_list: List[Dict],
|
136 |
+
objective: str,
|
137 |
+
) -> List[Dict]:
|
138 |
+
"""Prioritize tasks."""
|
139 |
+
task_names = [t["task_name"] for t in task_list]
|
140 |
+
next_task_id = int(this_task_id) + 1
|
141 |
+
response = task_prioritization_chain.run(
|
142 |
+
task_names=task_names, next_task_id=next_task_id, objective=objective
|
143 |
+
)
|
144 |
+
new_tasks = response.split("\n")
|
145 |
+
prioritized_task_list = []
|
146 |
+
for task_string in new_tasks:
|
147 |
+
if not task_string.strip():
|
148 |
+
continue
|
149 |
+
task_parts = task_string.strip().split(".", 1)
|
150 |
+
if len(task_parts) == 2:
|
151 |
+
task_id = task_parts[0].strip()
|
152 |
+
task_name = task_parts[1].strip()
|
153 |
+
prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
|
154 |
+
return prioritized_task_list
|
155 |
+
|
156 |
+
|
157 |
+
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
|
158 |
+
"""Get the top k tasks based on the query."""
|
159 |
+
results = vectorstore.similarity_search_with_score(query, k=k)
|
160 |
+
if not results:
|
161 |
+
return []
|
162 |
+
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
|
163 |
+
return [str(item.metadata["task"]) for item in sorted_results]
|
164 |
+
|
165 |
+
|
166 |
+
def execute_task(
|
167 |
+
vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5
|
168 |
+
) -> str:
|
169 |
+
"""Execute a task."""
|
170 |
+
context = _get_top_tasks(vectorstore, query=objective, k=k)
|
171 |
+
return execution_chain.run(objective=objective, context=context, task=task)
|
172 |
+
|
173 |
+
class BabyAGI(Chain, BaseModel):
|
174 |
+
"""Controller model for the BabyAGI agent."""
|
175 |
+
|
176 |
+
task_list: deque = Field(default_factory=deque)
|
177 |
+
task_creation_chain: TaskCreationChain = Field(...)
|
178 |
+
task_prioritization_chain: TaskPrioritizationChain = Field(...)
|
179 |
+
initial_task_creation_chain: InitialTaskCreationChain = Field(...)
|
180 |
+
execution_chain: AgentExecutor = Field(...)
|
181 |
+
task_id_counter: int = Field(1)
|
182 |
+
vectorstore: VectorStore = Field(init=False)
|
183 |
+
max_iterations: Optional[int] = None
|
184 |
+
|
185 |
+
class Config:
|
186 |
+
"""Configuration for this pydantic object."""
|
187 |
+
|
188 |
+
arbitrary_types_allowed = True
|
189 |
+
|
190 |
+
def add_task(self, task: Dict):
|
191 |
+
self.task_list.append(task)
|
192 |
+
|
193 |
+
def print_task_list(self):
|
194 |
+
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
|
195 |
+
for t in self.task_list:
|
196 |
+
print(str(t["task_id"]) + ": " + t["task_name"])
|
197 |
+
|
198 |
+
def print_next_task(self, task: Dict):
|
199 |
+
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
|
200 |
+
print(str(task["task_id"]) + ": " + task["task_name"])
|
201 |
+
|
202 |
+
def print_task_result(self, result: str):
|
203 |
+
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
|
204 |
+
print(result)
|
205 |
+
|
206 |
+
@property
|
207 |
+
def input_keys(self) -> List[str]:
|
208 |
+
return ["objective"]
|
209 |
+
|
210 |
+
@property
|
211 |
+
def output_keys(self) -> List[str]:
|
212 |
+
return []
|
213 |
+
|
214 |
+
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
215 |
+
"""Run the agent."""
|
216 |
+
# not an elegant implementation, but it works for the first task
|
217 |
+
objective = inputs["objective"]
|
218 |
+
first_task = inputs.get(
|
219 |
+
"first_task", self.initial_task_creation_chain.run(objective=objective)
|
220 |
+
) # self.task_creation_chain.llm(initial_task_prompt))
|
221 |
+
|
222 |
+
self.add_task({"task_id": 1, "task_name": first_task})
|
223 |
+
num_iters = 0
|
224 |
+
while True:
|
225 |
+
if self.task_list:
|
226 |
+
self.print_task_list()
|
227 |
+
|
228 |
+
# Step 1: Pull the first task
|
229 |
+
task = self.task_list.popleft()
|
230 |
+
self.print_next_task(task)
|
231 |
+
|
232 |
+
# Step 2: Execute the task
|
233 |
+
result = execute_task(
|
234 |
+
self.vectorstore, self.execution_chain, objective, task["task_name"]
|
235 |
+
)
|
236 |
+
this_task_id = int(task["task_id"])
|
237 |
+
self.print_task_result(result)
|
238 |
+
|
239 |
+
# Step 3: Store the result in Pinecone
|
240 |
+
result_id = f"result_{task['task_id']}"
|
241 |
+
self.vectorstore.add_texts(
|
242 |
+
texts=[result],
|
243 |
+
metadatas=[{"task": task["task_name"]}],
|
244 |
+
ids=[result_id],
|
245 |
+
)
|
246 |
+
|
247 |
+
# Step 4: Create new tasks and reprioritize task list
|
248 |
+
new_tasks = get_next_task(
|
249 |
+
self.task_creation_chain,
|
250 |
+
result,
|
251 |
+
task["task_name"],
|
252 |
+
[t["task_name"] for t in self.task_list],
|
253 |
+
objective,
|
254 |
+
)
|
255 |
+
for new_task in new_tasks:
|
256 |
+
self.task_id_counter += 1
|
257 |
+
new_task.update({"task_id": self.task_id_counter})
|
258 |
+
self.add_task(new_task)
|
259 |
+
|
260 |
+
if len(self.task_list) == 0:
|
261 |
+
print(
|
262 |
+
"\033[91m\033[1m"
|
263 |
+
+ "\n*****NO TASK, ABORTING*****\n"
|
264 |
+
+ "\033[0m\033[0m"
|
265 |
+
)
|
266 |
+
break
|
267 |
+
|
268 |
+
self.task_list = deque(
|
269 |
+
prioritize_tasks(
|
270 |
+
self.task_prioritization_chain,
|
271 |
+
this_task_id,
|
272 |
+
list(self.task_list),
|
273 |
+
objective,
|
274 |
+
)
|
275 |
+
)
|
276 |
+
num_iters += 1
|
277 |
+
if self.max_iterations is not None and num_iters == self.max_iterations:
|
278 |
+
print(
|
279 |
+
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
|
280 |
+
)
|
281 |
+
break
|
282 |
+
return {}
|
283 |
+
|
284 |
+
@classmethod
|
285 |
+
def from_llm(
|
286 |
+
cls,
|
287 |
+
llm: BaseLLM,
|
288 |
+
prompt=None,
|
289 |
+
verbose: bool = False,
|
290 |
+
tools=None,
|
291 |
+
stream_output=None,
|
292 |
+
**kwargs,
|
293 |
+
) -> "BabyAGI":
|
294 |
+
embeddings_model = OpenAIEmbeddings()
|
295 |
+
embedding_size = 1536
|
296 |
+
index = faiss.IndexFlatL2(embedding_size)
|
297 |
+
vectorstore = FAISS(
|
298 |
+
embeddings_model.embed_query, index, InMemoryDocstore({}), {}
|
299 |
+
)
|
300 |
+
|
301 |
+
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
|
302 |
+
initial_task_creation_chain = InitialTaskCreationChain.from_llm(
|
303 |
+
llm, verbose=verbose
|
304 |
+
)
|
305 |
+
task_prioritization_chain = TaskPrioritizationChain.from_llm(
|
306 |
+
llm, verbose=verbose
|
307 |
+
)
|
308 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
309 |
+
tool_names = [tool.name for tool in tools]
|
310 |
+
agent = ContextAwareAgent(llm_chain=llm_chain, allowed_tools=tool_names)
|
311 |
+
|
312 |
+
if stream_output:
|
313 |
+
agent_executor = Executor.from_agent_and_tools(
|
314 |
+
agent=agent, tools=tools, verbose=True
|
315 |
+
)
|
316 |
+
else:
|
317 |
+
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(
|
318 |
+
agent=agent, tools=tools, verbose=True
|
319 |
+
)
|
320 |
+
|
321 |
+
return cls(
|
322 |
+
task_creation_chain=task_creation_chain,
|
323 |
+
task_prioritization_chain=task_prioritization_chain,
|
324 |
+
initial_task_creation_chain=initial_task_creation_chain,
|
325 |
+
execution_chain=agent_executor,
|
326 |
+
vectorstore=vectorstore,
|
327 |
+
**kwargs,
|
328 |
+
)
|
329 |
+
|
330 |
+
|
331 |
+
if __name__ == "__main__":
|
332 |
+
todo_prompt = PromptTemplate.from_template(
|
333 |
+
"You are a planner who is an expert at coming up with a todo list for a given objective. For a simple objective, do not generate a complex todo list. Come up with a todo list for this objective: {objective}"
|
334 |
+
)
|
335 |
+
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
|
336 |
+
search = SerpAPIWrapper()
|
337 |
+
tools = [
|
338 |
+
Tool(
|
339 |
+
name="Search",
|
340 |
+
func=search.run,
|
341 |
+
description="useful for when you need to answer questions about current events",
|
342 |
+
),
|
343 |
+
Tool(
|
344 |
+
name="TODO",
|
345 |
+
func=todo_chain.run,
|
346 |
+
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
|
347 |
+
),
|
348 |
+
]
|
349 |
+
|
350 |
+
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
|
351 |
+
suffix = """Question: {task}
|
352 |
+
{agent_scratchpad}"""
|
353 |
+
prompt = ZeroShotAgent.create_prompt(
|
354 |
+
tools,
|
355 |
+
prefix=prefix,
|
356 |
+
suffix=suffix,
|
357 |
+
input_variables=["objective", "task", "context", "agent_scratchpad"],
|
358 |
+
)
|
359 |
+
|
360 |
+
OBJECTIVE = "Write a weather report for SF today"
|
361 |
+
llm = OpenAI(temperature=0)
|
362 |
+
# Logging of LLMChains
|
363 |
+
verbose = False
|
364 |
+
# If None, will keep on going forever
|
365 |
+
max_iterations: Optional[int] = 10
|
366 |
+
baby_agi = BabyAGI.from_llm(llm=llm, verbose=verbose, max_iterations=max_iterations)
|
367 |
+
baby_agi({"objective": OBJECTIVE})
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: purple
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.19.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: tools
|
3 |
+
app_file: app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.19.2
|
|
|
|
|
6 |
---
|
|
|
|
__pycache__/BabyagiTools.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
__pycache__/apitool.cpython-310.pyc
ADDED
Binary file (5.19 kB). View file
|
|
__pycache__/executor.cpython-310.pyc
ADDED
Binary file (3.15 kB). View file
|
|
__pycache__/logger.cpython-310.pyc
ADDED
Binary file (345 Bytes). View file
|
|
__pycache__/logging.cpython-310.pyc
ADDED
Binary file (7.56 kB). View file
|
|
__pycache__/registry.cpython-310.pyc
ADDED
Binary file (1.92 kB). View file
|
|
__pycache__/retriever.cpython-310.pyc
ADDED
Binary file (2.08 kB). View file
|
|
__pycache__/serve.cpython-310.pyc
ADDED
Binary file (3.83 kB). View file
|
|
__pycache__/singletool.cpython-310.pyc
ADDED
Binary file (7.46 kB). View file
|
|
__pycache__/tool.cpython-310.pyc
ADDED
Binary file (3.5 kB). View file
|
|
__pycache__/tool_logging.cpython-310.pyc
ADDED
Binary file (7.59 kB). View file
|
|
__pycache__/tool_server.cpython-310.pyc
ADDED
Binary file (2.33 kB). View file
|
|
__pycache__/tools_controller.cpython-310.pyc
ADDED
Binary file (4.22 kB). View file
|
|
__pycache__/translator.cpython-310.pyc
ADDED
Binary file (6.77 kB). View file
|
|
agent/BabyagiTools.py
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import deque
|
2 |
+
from typing import Dict, List, Optional, Any
|
3 |
+
import re
|
4 |
+
|
5 |
+
from langchain import LLMChain, OpenAI, PromptTemplate, SerpAPIWrapper
|
6 |
+
from langchain.embeddings import OpenAIEmbeddings
|
7 |
+
from langchain.llms import BaseLLM
|
8 |
+
from langchain.vectorstores.base import VectorStore
|
9 |
+
from pydantic import BaseModel, Field
|
10 |
+
from langchain.chains.base import Chain
|
11 |
+
|
12 |
+
from langchain.vectorstores import FAISS
|
13 |
+
import faiss
|
14 |
+
from langchain.docstore import InMemoryDocstore
|
15 |
+
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
|
16 |
+
from swarms.agent.executor import Executor, AgentExecutorWithTranslation
|
17 |
+
|
18 |
+
|
19 |
+
class ContextAwareAgent(ZeroShotAgent):
|
20 |
+
def get_full_inputs(self, intermediate_steps, **kwargs: Any) -> Dict[str, Any]:
|
21 |
+
"""Create the full inputs for the LLMChain from intermediate steps."""
|
22 |
+
thoughts = self._construct_scratchpad(intermediate_steps)
|
23 |
+
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
|
24 |
+
full_inputs = {**kwargs, **new_inputs}
|
25 |
+
return full_inputs
|
26 |
+
|
27 |
+
def _construct_scratchpad(self, intermediate_steps):
|
28 |
+
"""Construct the scratchpad that lets the agent continue its thought process."""
|
29 |
+
thoughts = ""
|
30 |
+
# only modify the following line, [-2: ]
|
31 |
+
for action, observation in intermediate_steps[-2:]:
|
32 |
+
thoughts += action.log
|
33 |
+
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
|
34 |
+
if "is not a valid tool, try another one" in observation:
|
35 |
+
thoughts += (
|
36 |
+
"You should select another tool rather than the invalid one.\n"
|
37 |
+
)
|
38 |
+
return thoughts
|
39 |
+
|
40 |
+
|
41 |
+
class TaskCreationChain(LLMChain):
|
42 |
+
"""Chain to generates tasks."""
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
46 |
+
"""Get the response parser."""
|
47 |
+
task_creation_template = (
|
48 |
+
"You are an task creation AI that uses the result of an execution agent"
|
49 |
+
" to create new tasks with the following objective: {objective},"
|
50 |
+
" The last completed task has the result: {result}."
|
51 |
+
" This result was based on this task description: {task_description}."
|
52 |
+
" These are incomplete tasks: {incomplete_tasks}."
|
53 |
+
" Based on the result, create new tasks to be completed"
|
54 |
+
" by the AI system that do not overlap with incomplete tasks."
|
55 |
+
" For a simple objective, do not generate complex todo lists."
|
56 |
+
" Do not generate repetitive tasks (e.g., tasks that have already been completed)."
|
57 |
+
" If there is not futher task needed to complete the objective, return NO TASK."
|
58 |
+
" Now return the tasks as an array."
|
59 |
+
)
|
60 |
+
prompt = PromptTemplate(
|
61 |
+
template=task_creation_template,
|
62 |
+
input_variables=[
|
63 |
+
"result",
|
64 |
+
"task_description",
|
65 |
+
"incomplete_tasks",
|
66 |
+
"objective",
|
67 |
+
],
|
68 |
+
)
|
69 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
70 |
+
|
71 |
+
|
72 |
+
class InitialTaskCreationChain(LLMChain):
|
73 |
+
"""Chain to generates tasks."""
|
74 |
+
|
75 |
+
@classmethod
|
76 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
77 |
+
"""Get the response parser."""
|
78 |
+
task_creation_template = "You are a planner who is an expert at coming up with a todo list for a given objective. For a simple objective, do not generate a complex todo list. Generate the first (only one) task needed to do for this objective: {objective}"
|
79 |
+
prompt = PromptTemplate(
|
80 |
+
template=task_creation_template,
|
81 |
+
input_variables=["objective"],
|
82 |
+
)
|
83 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
84 |
+
|
85 |
+
|
86 |
+
class TaskPrioritizationChain(LLMChain):
|
87 |
+
"""Chain to prioritize tasks."""
|
88 |
+
|
89 |
+
@classmethod
|
90 |
+
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
|
91 |
+
"""Get the response parser."""
|
92 |
+
task_prioritization_template = (
|
93 |
+
"You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
|
94 |
+
" the following tasks: {task_names}."
|
95 |
+
" Consider the ultimate objective of your team: {objective}."
|
96 |
+
" Do not make up any tasks, just reorganize the existing tasks."
|
97 |
+
" Do not remove any tasks. Return the result as a numbered list, like:"
|
98 |
+
" #. First task"
|
99 |
+
" #. Second task"
|
100 |
+
" Start the task list with number {next_task_id}. (e.g., 2. ***, 3. ***, etc.)"
|
101 |
+
)
|
102 |
+
prompt = PromptTemplate(
|
103 |
+
template=task_prioritization_template,
|
104 |
+
input_variables=["task_names", "next_task_id", "objective"],
|
105 |
+
)
|
106 |
+
return cls(prompt=prompt, llm=llm, verbose=verbose)
|
107 |
+
|
108 |
+
|
109 |
+
def get_next_task(
|
110 |
+
task_creation_chain: LLMChain,
|
111 |
+
result: Dict,
|
112 |
+
task_description: str,
|
113 |
+
task_list: List[str],
|
114 |
+
objective: str,
|
115 |
+
) -> List[Dict]:
|
116 |
+
"""Get the next task."""
|
117 |
+
incomplete_tasks = ", ".join(task_list)
|
118 |
+
response = task_creation_chain.run(
|
119 |
+
result=result,
|
120 |
+
task_description=task_description,
|
121 |
+
incomplete_tasks=incomplete_tasks,
|
122 |
+
objective=objective,
|
123 |
+
)
|
124 |
+
# change the split method to re matching
|
125 |
+
# new_tasks = response.split('\n')
|
126 |
+
task_pattern = re.compile(r"\d+\. (.+?)\n")
|
127 |
+
new_tasks = task_pattern.findall(response)
|
128 |
+
|
129 |
+
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
|
130 |
+
|
131 |
+
|
132 |
+
def prioritize_tasks(
|
133 |
+
task_prioritization_chain: LLMChain,
|
134 |
+
this_task_id: int,
|
135 |
+
task_list: List[Dict],
|
136 |
+
objective: str,
|
137 |
+
) -> List[Dict]:
|
138 |
+
"""Prioritize tasks."""
|
139 |
+
task_names = [t["task_name"] for t in task_list]
|
140 |
+
next_task_id = int(this_task_id) + 1
|
141 |
+
response = task_prioritization_chain.run(
|
142 |
+
task_names=task_names, next_task_id=next_task_id, objective=objective
|
143 |
+
)
|
144 |
+
new_tasks = response.split("\n")
|
145 |
+
prioritized_task_list = []
|
146 |
+
for task_string in new_tasks:
|
147 |
+
if not task_string.strip():
|
148 |
+
continue
|
149 |
+
task_parts = task_string.strip().split(".", 1)
|
150 |
+
if len(task_parts) == 2:
|
151 |
+
task_id = task_parts[0].strip()
|
152 |
+
task_name = task_parts[1].strip()
|
153 |
+
prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
|
154 |
+
return prioritized_task_list
|
155 |
+
|
156 |
+
|
157 |
+
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
|
158 |
+
"""Get the top k tasks based on the query."""
|
159 |
+
results = vectorstore.similarity_search_with_score(query, k=k)
|
160 |
+
if not results:
|
161 |
+
return []
|
162 |
+
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
|
163 |
+
return [str(item.metadata["task"]) for item in sorted_results]
|
164 |
+
|
165 |
+
|
166 |
+
def execute_task(
|
167 |
+
vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5
|
168 |
+
) -> str:
|
169 |
+
"""Execute a task."""
|
170 |
+
context = _get_top_tasks(vectorstore, query=objective, k=k)
|
171 |
+
return execution_chain.run(objective=objective, context=context, task=task)
|
172 |
+
|
173 |
+
|
174 |
+
class BabyAGI(Chain, BaseModel):
|
175 |
+
"""Controller model for the BabyAGI agent."""
|
176 |
+
|
177 |
+
task_list: deque = Field(default_factory=deque)
|
178 |
+
task_creation_chain: TaskCreationChain = Field(...)
|
179 |
+
task_prioritization_chain: TaskPrioritizationChain = Field(...)
|
180 |
+
initial_task_creation_chain: InitialTaskCreationChain = Field(...)
|
181 |
+
execution_chain: AgentExecutor = Field(...)
|
182 |
+
task_id_counter: int = Field(1)
|
183 |
+
vectorstore: VectorStore = Field(init=False)
|
184 |
+
max_iterations: Optional[int] = None
|
185 |
+
|
186 |
+
class Config:
|
187 |
+
"""Configuration for this pydantic object."""
|
188 |
+
|
189 |
+
arbitrary_types_allowed = True
|
190 |
+
|
191 |
+
def add_task(self, task: Dict):
|
192 |
+
self.task_list.append(task)
|
193 |
+
|
194 |
+
def print_task_list(self):
|
195 |
+
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
|
196 |
+
for t in self.task_list:
|
197 |
+
print(str(t["task_id"]) + ": " + t["task_name"])
|
198 |
+
|
199 |
+
def print_next_task(self, task: Dict):
|
200 |
+
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
|
201 |
+
print(str(task["task_id"]) + ": " + task["task_name"])
|
202 |
+
|
203 |
+
def print_task_result(self, result: str):
|
204 |
+
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
|
205 |
+
print(result)
|
206 |
+
|
207 |
+
@property
|
208 |
+
def input_keys(self) -> List[str]:
|
209 |
+
return ["objective"]
|
210 |
+
|
211 |
+
@property
|
212 |
+
def output_keys(self) -> List[str]:
|
213 |
+
return []
|
214 |
+
|
215 |
+
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
216 |
+
"""Run the agent."""
|
217 |
+
# not an elegant implementation, but it works for the first task
|
218 |
+
objective = inputs["objective"]
|
219 |
+
first_task = inputs.get(
|
220 |
+
"first_task", self.initial_task_creation_chain.run(objective=objective)
|
221 |
+
) # self.task_creation_chain.llm(initial_task_prompt))
|
222 |
+
|
223 |
+
self.add_task({"task_id": 1, "task_name": first_task})
|
224 |
+
num_iters = 0
|
225 |
+
while True:
|
226 |
+
if self.task_list:
|
227 |
+
self.print_task_list()
|
228 |
+
|
229 |
+
# Step 1: Pull the first task
|
230 |
+
task = self.task_list.popleft()
|
231 |
+
self.print_next_task(task)
|
232 |
+
|
233 |
+
# Step 2: Execute the task
|
234 |
+
result = execute_task(
|
235 |
+
self.vectorstore, self.execution_chain, objective, task["task_name"]
|
236 |
+
)
|
237 |
+
this_task_id = int(task["task_id"])
|
238 |
+
self.print_task_result(result)
|
239 |
+
|
240 |
+
# Step 3: Store the result in Pinecone
|
241 |
+
result_id = f"result_{task['task_id']}"
|
242 |
+
self.vectorstore.add_texts(
|
243 |
+
texts=[result],
|
244 |
+
metadatas=[{"task": task["task_name"]}],
|
245 |
+
ids=[result_id],
|
246 |
+
)
|
247 |
+
|
248 |
+
# Step 4: Create new tasks and reprioritize task list
|
249 |
+
new_tasks = get_next_task(
|
250 |
+
self.task_creation_chain,
|
251 |
+
result,
|
252 |
+
task["task_name"],
|
253 |
+
[t["task_name"] for t in self.task_list],
|
254 |
+
objective,
|
255 |
+
)
|
256 |
+
for new_task in new_tasks:
|
257 |
+
self.task_id_counter += 1
|
258 |
+
new_task.update({"task_id": self.task_id_counter})
|
259 |
+
self.add_task(new_task)
|
260 |
+
|
261 |
+
if len(self.task_list) == 0:
|
262 |
+
print(
|
263 |
+
"\033[91m\033[1m"
|
264 |
+
+ "\n*****NO TASK, ABORTING*****\n"
|
265 |
+
+ "\033[0m\033[0m"
|
266 |
+
)
|
267 |
+
break
|
268 |
+
|
269 |
+
self.task_list = deque(
|
270 |
+
prioritize_tasks(
|
271 |
+
self.task_prioritization_chain,
|
272 |
+
this_task_id,
|
273 |
+
list(self.task_list),
|
274 |
+
objective,
|
275 |
+
)
|
276 |
+
)
|
277 |
+
num_iters += 1
|
278 |
+
if self.max_iterations is not None and num_iters == self.max_iterations:
|
279 |
+
print(
|
280 |
+
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
|
281 |
+
)
|
282 |
+
break
|
283 |
+
return {}
|
284 |
+
|
285 |
+
@classmethod
|
286 |
+
def from_llm(
|
287 |
+
cls,
|
288 |
+
llm: BaseLLM,
|
289 |
+
prompt=None,
|
290 |
+
verbose: bool = False,
|
291 |
+
tools=None,
|
292 |
+
stream_output=None,
|
293 |
+
**kwargs,
|
294 |
+
) -> "BabyAGI":
|
295 |
+
embeddings_model = OpenAIEmbeddings()
|
296 |
+
embedding_size = 1536
|
297 |
+
index = faiss.IndexFlatL2(embedding_size)
|
298 |
+
vectorstore = FAISS(
|
299 |
+
embeddings_model.embed_query, index, InMemoryDocstore({}), {}
|
300 |
+
)
|
301 |
+
|
302 |
+
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
|
303 |
+
initial_task_creation_chain = InitialTaskCreationChain.from_llm(
|
304 |
+
llm, verbose=verbose
|
305 |
+
)
|
306 |
+
task_prioritization_chain = TaskPrioritizationChain.from_llm(
|
307 |
+
llm, verbose=verbose
|
308 |
+
)
|
309 |
+
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
310 |
+
tool_names = [tool.name for tool in tools]
|
311 |
+
agent = ContextAwareAgent(llm_chain=llm_chain, allowed_tools=tool_names)
|
312 |
+
|
313 |
+
if stream_output:
|
314 |
+
agent_executor = Executor.from_agent_and_tools(
|
315 |
+
agent=agent, tools=tools, verbose=True
|
316 |
+
)
|
317 |
+
else:
|
318 |
+
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(
|
319 |
+
agent=agent, tools=tools, verbose=True
|
320 |
+
)
|
321 |
+
|
322 |
+
return cls(
|
323 |
+
task_creation_chain=task_creation_chain,
|
324 |
+
task_prioritization_chain=task_prioritization_chain,
|
325 |
+
initial_task_creation_chain=initial_task_creation_chain,
|
326 |
+
execution_chain=agent_executor,
|
327 |
+
vectorstore=vectorstore,
|
328 |
+
**kwargs,
|
329 |
+
)
|
330 |
+
|
331 |
+
|
332 |
+
if __name__ == "__main__":
|
333 |
+
todo_prompt = PromptTemplate.from_template(
|
334 |
+
"You are a planner who is an expert at coming up with a todo list for a given objective. For a simple objective, do not generate a complex todo list. Come up with a todo list for this objective: {objective}"
|
335 |
+
)
|
336 |
+
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
|
337 |
+
search = SerpAPIWrapper()
|
338 |
+
tools = [
|
339 |
+
Tool(
|
340 |
+
name="Search",
|
341 |
+
func=search.run,
|
342 |
+
description="useful for when you need to answer questions about current events",
|
343 |
+
),
|
344 |
+
Tool(
|
345 |
+
name="TODO",
|
346 |
+
func=todo_chain.run,
|
347 |
+
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
|
348 |
+
),
|
349 |
+
]
|
350 |
+
|
351 |
+
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
|
352 |
+
suffix = """Question: {task}
|
353 |
+
{agent_scratchpad}"""
|
354 |
+
prompt = ZeroShotAgent.create_prompt(
|
355 |
+
tools,
|
356 |
+
prefix=prefix,
|
357 |
+
suffix=suffix,
|
358 |
+
input_variables=["objective", "task", "context", "agent_scratchpad"],
|
359 |
+
)
|
360 |
+
|
361 |
+
OBJECTIVE = "Write a weather report for SF today"
|
362 |
+
llm = OpenAI(temperature=0)
|
363 |
+
# Logging of LLMChains
|
364 |
+
verbose = False
|
365 |
+
# If None, will keep on going forever
|
366 |
+
max_iterations: Optional[int] = 10
|
367 |
+
baby_agi = BabyAGI.from_llm(llm=llm, verbose=verbose, max_iterations=max_iterations)
|
368 |
+
baby_agi({"objective": OBJECTIVE})
|
agent/apitool.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Interface for tools."""
|
2 |
+
from inspect import signature
|
3 |
+
from typing import Any, Awaitable, Callable, Optional, Union
|
4 |
+
|
5 |
+
from langchain.agents import Tool as LangChainTool
|
6 |
+
from langchain.tools.base import BaseTool
|
7 |
+
import requests
|
8 |
+
import json
|
9 |
+
import aiohttp
|
10 |
+
import http.client
|
11 |
+
|
12 |
+
http.client._MAXLINE = 655360
|
13 |
+
|
14 |
+
from swarms.utils import get_logger
|
15 |
+
|
16 |
+
logger = get_logger(__name__)
|
17 |
+
|
18 |
+
|
19 |
+
class Tool(LangChainTool):
|
20 |
+
tool_logo_md: str = ""
|
21 |
+
|
22 |
+
|
23 |
+
class RequestTool(BaseTool):
|
24 |
+
"""Tool that takes in function or coroutine directly."""
|
25 |
+
|
26 |
+
description: str = ""
|
27 |
+
func: Callable[[str], str]
|
28 |
+
afunc: Callable[[str], str]
|
29 |
+
coroutine: Optional[Callable[[str], Awaitable[str]]] = None
|
30 |
+
max_output_len = 4000
|
31 |
+
tool_logo_md: str = ""
|
32 |
+
|
33 |
+
def _run(self, tool_input: str) -> str:
|
34 |
+
"""Use the tool."""
|
35 |
+
return self.func(tool_input)
|
36 |
+
|
37 |
+
async def _arun(self, tool_input: str) -> str:
|
38 |
+
"""Use the tool asynchronously."""
|
39 |
+
ret = await self.afunc(tool_input)
|
40 |
+
return ret
|
41 |
+
|
42 |
+
def convert_prompt(self, params):
|
43 |
+
lines = "Your input should be a json (args json schema): {{"
|
44 |
+
for p in params:
|
45 |
+
logger.debug(p)
|
46 |
+
optional = not p["required"]
|
47 |
+
description = p.get("description", "")
|
48 |
+
if len(description) > 0:
|
49 |
+
description = "(" + description + ")"
|
50 |
+
|
51 |
+
lines += '"{name}" : {type}{desc}, '.format(
|
52 |
+
name=p["name"],
|
53 |
+
type=p["schema"]["type"],
|
54 |
+
optional=optional,
|
55 |
+
desc=description,
|
56 |
+
)
|
57 |
+
|
58 |
+
lines += "}}"
|
59 |
+
return lines
|
60 |
+
|
61 |
+
def __init__(self, root_url, func_url, method, request_info, **kwargs):
|
62 |
+
"""Store the function, description, and tool_name in a class to store the information"""
|
63 |
+
url = root_url + func_url
|
64 |
+
|
65 |
+
def func(json_args):
|
66 |
+
if isinstance(json_args, str):
|
67 |
+
try:
|
68 |
+
json_args = json.loads(json_args)
|
69 |
+
except:
|
70 |
+
return "Your input can not be parsed as json, please use thought."
|
71 |
+
if "tool_input" in json_args:
|
72 |
+
json_args = json_args["tool_input"]
|
73 |
+
|
74 |
+
# if it's post put patch, then we do json
|
75 |
+
if method.lower() in ["post", "put", "patch"]:
|
76 |
+
response = getattr(requests, method.lower())(url, json=json_args)
|
77 |
+
else:
|
78 |
+
# for other methods, we use get, and use json_args as query params
|
79 |
+
response = requests.get(url, params=json_args)
|
80 |
+
if response.status_code == 200:
|
81 |
+
message = response.text
|
82 |
+
else:
|
83 |
+
message = f"Error code {response.status_code}. You can try (1) Change your input (2) Call another function. (If the same error code is produced more than 4 times, please use Thought: I can not use these APIs, so I will stop. Final Answer: No Answer, please check the APIs.)"
|
84 |
+
|
85 |
+
message = message[: self.max_output_len] # TODO: not rigorous, to improve
|
86 |
+
return message
|
87 |
+
|
88 |
+
def convert_openapi_to_params(request_body):
|
89 |
+
if not request_body:
|
90 |
+
return []
|
91 |
+
params = []
|
92 |
+
for content_type, content in request_body["content"].items():
|
93 |
+
schema = content["schema"]
|
94 |
+
properties = schema.get("properties", {})
|
95 |
+
required = schema.get("required", [])
|
96 |
+
for key, value in properties.items():
|
97 |
+
param = {
|
98 |
+
"name": key,
|
99 |
+
"schema": value,
|
100 |
+
"required": key in required,
|
101 |
+
"description": value.get("description", ""),
|
102 |
+
}
|
103 |
+
if (
|
104 |
+
content_type == "multipart/form-data"
|
105 |
+
and value.get("format") == "binary"
|
106 |
+
):
|
107 |
+
param["type"] = "file"
|
108 |
+
elif content_type in [
|
109 |
+
"application/x-www-form-urlencoded",
|
110 |
+
"multipart/form-data",
|
111 |
+
]:
|
112 |
+
param["type"] = "form"
|
113 |
+
else:
|
114 |
+
param["type"] = "json"
|
115 |
+
params.append(param)
|
116 |
+
return params
|
117 |
+
|
118 |
+
async def afunc(json_args):
|
119 |
+
if isinstance(json_args, str):
|
120 |
+
try:
|
121 |
+
json_args = json.loads(json_args)
|
122 |
+
except:
|
123 |
+
return "Your input can not be parsed as json, please use thought."
|
124 |
+
if "tool_input" in json_args:
|
125 |
+
json_args = json_args["tool_input"]
|
126 |
+
|
127 |
+
async with aiohttp.ClientSession() as session:
|
128 |
+
async with session.get(url, params=json_args) as response:
|
129 |
+
if response.status == 200:
|
130 |
+
message = await response.text()
|
131 |
+
else:
|
132 |
+
message = f"Error code {response.status_code}. You can try (1) Change your input (2) Call another function. (If the same error code is produced more than 4 times, please use Thought: I can not use these APIs, so I will stop. Final Answer: No Answer, please check the APIs.)"
|
133 |
+
|
134 |
+
message = message[: self.max_output_len] # TODO: not rigorous, to improve
|
135 |
+
return message
|
136 |
+
|
137 |
+
tool_name = func_url.replace("/", ".").strip(".")
|
138 |
+
str_doc = ""
|
139 |
+
if "parameters" in request_info[method]:
|
140 |
+
str_doc = self.convert_prompt(request_info[method]["parameters"])
|
141 |
+
|
142 |
+
if "requestBody" in request_info[method]:
|
143 |
+
str_doc = (
|
144 |
+
str_doc
|
145 |
+
+ "\n"
|
146 |
+
+ self.convert_prompt(
|
147 |
+
convert_openapi_to_params(request_info[method]["requestBody"])
|
148 |
+
)
|
149 |
+
)
|
150 |
+
|
151 |
+
# description = f"- {tool_name}:\n" + \
|
152 |
+
# request_info[method].get('summary', '').replace("{", "{{").replace("}", "}}") \
|
153 |
+
description = (
|
154 |
+
request_info[method]
|
155 |
+
.get("description", "")
|
156 |
+
.replace("{", "{{")
|
157 |
+
.replace("}", "}}")
|
158 |
+
+ ". "
|
159 |
+
+ str_doc
|
160 |
+
+ f" The Action to trigger this API should be {tool_name} and the input parameters should be a json dict string. Pay attention to the type of parameters."
|
161 |
+
)
|
162 |
+
|
163 |
+
logger.info("API Name: {}".format(tool_name))
|
164 |
+
logger.info("API Description: {}".format(description))
|
165 |
+
|
166 |
+
super(RequestTool, self).__init__(
|
167 |
+
name=tool_name, func=func, afunc=afunc, description=description, **kwargs
|
168 |
+
)
|
agent/autogpt/__init__.py
ADDED
File without changes
|
agent/autogpt/agent.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import List, Optional
|
4 |
+
|
5 |
+
from pydantic import ValidationError
|
6 |
+
|
7 |
+
from langchain.chains.llm import LLMChain
|
8 |
+
from langchain.chat_models.base import BaseChatModel
|
9 |
+
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
|
10 |
+
AutoGPTOutputParser,
|
11 |
+
BaseAutoGPTOutputParser,
|
12 |
+
)
|
13 |
+
from .prompt import AutoGPTPrompt
|
14 |
+
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
|
15 |
+
FINISH_NAME,
|
16 |
+
)
|
17 |
+
from langchain.schema import (
|
18 |
+
AIMessage,
|
19 |
+
BaseMessage,
|
20 |
+
Document,
|
21 |
+
HumanMessage,
|
22 |
+
SystemMessage,
|
23 |
+
)
|
24 |
+
from langchain.tools.base import BaseTool
|
25 |
+
from langchain.tools.human.tool import HumanInputRun
|
26 |
+
from langchain.vectorstores.base import VectorStoreRetriever
|
27 |
+
import json
|
28 |
+
|
29 |
+
|
30 |
+
class AutoGPT:
|
31 |
+
"""Agent class for interacting with Auto-GPT."""
|
32 |
+
|
33 |
+
def __init__(
|
34 |
+
self,
|
35 |
+
ai_name: str,
|
36 |
+
memory: VectorStoreRetriever,
|
37 |
+
chain: LLMChain,
|
38 |
+
output_parser: BaseAutoGPTOutputParser,
|
39 |
+
tools: List[BaseTool],
|
40 |
+
feedback_tool: Optional[HumanInputRun] = None,
|
41 |
+
):
|
42 |
+
self.ai_name = ai_name
|
43 |
+
self.memory = memory
|
44 |
+
self.full_message_history: List[BaseMessage] = []
|
45 |
+
self.next_action_count = 0
|
46 |
+
self.chain = chain
|
47 |
+
self.output_parser = output_parser
|
48 |
+
self.tools = tools
|
49 |
+
self.feedback_tool = feedback_tool
|
50 |
+
|
51 |
+
@classmethod
|
52 |
+
def from_llm_and_tools(
|
53 |
+
cls,
|
54 |
+
ai_name: str,
|
55 |
+
ai_role: str,
|
56 |
+
memory: VectorStoreRetriever,
|
57 |
+
tools: List[BaseTool],
|
58 |
+
llm: BaseChatModel,
|
59 |
+
human_in_the_loop: bool = False,
|
60 |
+
output_parser: Optional[BaseAutoGPTOutputParser] = None,
|
61 |
+
) -> AutoGPT:
|
62 |
+
prompt = AutoGPTPrompt(
|
63 |
+
ai_name=ai_name,
|
64 |
+
ai_role=ai_role,
|
65 |
+
tools=tools,
|
66 |
+
input_variables=["memory", "messages", "goals", "user_input"],
|
67 |
+
token_counter=llm.get_num_tokens,
|
68 |
+
)
|
69 |
+
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
|
70 |
+
chain = LLMChain(llm=llm, prompt=prompt)
|
71 |
+
return cls(
|
72 |
+
ai_name,
|
73 |
+
memory,
|
74 |
+
chain,
|
75 |
+
output_parser or AutoGPTOutputParser(),
|
76 |
+
tools,
|
77 |
+
feedback_tool=human_feedback_tool,
|
78 |
+
)
|
79 |
+
|
80 |
+
def run(self, goals: List[str]) -> str:
|
81 |
+
user_input = (
|
82 |
+
"Determine which next command to use, "
|
83 |
+
"and respond using the format specified above:"
|
84 |
+
)
|
85 |
+
# Interaction Loop
|
86 |
+
loop_count = 0
|
87 |
+
while True:
|
88 |
+
# Discontinue if continuous limit is reached
|
89 |
+
loop_count += 1
|
90 |
+
# Send message to AI, get response
|
91 |
+
assistant_reply = self.chain.run(
|
92 |
+
goals=goals,
|
93 |
+
messages=self.full_message_history,
|
94 |
+
memory=self.memory,
|
95 |
+
user_input=user_input,
|
96 |
+
)
|
97 |
+
|
98 |
+
# Print Assistant thoughts
|
99 |
+
print(assistant_reply)
|
100 |
+
self.full_message_history.append(HumanMessage(content=user_input))
|
101 |
+
self.full_message_history.append(AIMessage(content=assistant_reply))
|
102 |
+
|
103 |
+
# Get command name and arguments
|
104 |
+
action = self.output_parser.parse(assistant_reply)
|
105 |
+
tools = {t.name: t for t in self.tools}
|
106 |
+
if action.name == FINISH_NAME:
|
107 |
+
return action.args["response"]
|
108 |
+
if action.name in tools:
|
109 |
+
tool = tools[action.name]
|
110 |
+
try:
|
111 |
+
# for tools in swarms.tools, the input should be string, while for default langchain toosl, the input is in json format, here we modify the following code
|
112 |
+
json_args = json.dumps(action.args)
|
113 |
+
observation = tool.run(json_args)
|
114 |
+
except ValidationError as e:
|
115 |
+
observation = f"Error in args: {str(e)}"
|
116 |
+
result = f"Command {tool.name} returned: {observation}"
|
117 |
+
elif action.name == "ERROR":
|
118 |
+
result = f"Error: {action.args}. "
|
119 |
+
else:
|
120 |
+
result = (
|
121 |
+
f"Unknown command '{action.name}'. "
|
122 |
+
f"Please refer to the 'COMMANDS' list for available "
|
123 |
+
f"commands and only respond in the specified JSON format."
|
124 |
+
)
|
125 |
+
|
126 |
+
memory_to_add = (
|
127 |
+
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
|
128 |
+
)
|
129 |
+
if self.feedback_tool is not None:
|
130 |
+
feedback = f"\n{self.feedback_tool.run('Input: ')}"
|
131 |
+
if feedback in {"q", "stop"}:
|
132 |
+
print("EXITING")
|
133 |
+
return "EXITING"
|
134 |
+
memory_to_add += feedback
|
135 |
+
|
136 |
+
self.memory.add_documents([Document(page_content=memory_to_add)])
|
137 |
+
self.full_message_history.append(SystemMessage(content=result))
|
agent/autogpt/output_parser.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
from abc import abstractmethod
|
4 |
+
from typing import Dict, NamedTuple
|
5 |
+
|
6 |
+
from langchain.schema import BaseOutputParser
|
7 |
+
|
8 |
+
|
9 |
+
class AutoGPTAction(NamedTuple):
|
10 |
+
"""Action returned by AutoGPTOutputParser."""
|
11 |
+
|
12 |
+
name: str
|
13 |
+
args: Dict
|
14 |
+
|
15 |
+
|
16 |
+
class BaseAutoGPTOutputParser(BaseOutputParser):
|
17 |
+
"""Base Output parser for AutoGPT."""
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def parse(self, text: str) -> AutoGPTAction:
|
21 |
+
"""Return AutoGPTAction"""
|
22 |
+
|
23 |
+
|
24 |
+
def preprocess_json_input(input_str: str) -> str:
|
25 |
+
"""Preprocesses a string to be parsed as json.
|
26 |
+
|
27 |
+
Replace single backslashes with double backslashes,
|
28 |
+
while leaving already escaped ones intact.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
input_str: String to be preprocessed
|
32 |
+
|
33 |
+
Returns:
|
34 |
+
Preprocessed string
|
35 |
+
"""
|
36 |
+
corrected_str = re.sub(
|
37 |
+
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
|
38 |
+
)
|
39 |
+
return corrected_str
|
40 |
+
|
41 |
+
|
42 |
+
class AutoGPTOutputParser(BaseAutoGPTOutputParser):
|
43 |
+
"""Output parser for AutoGPT."""
|
44 |
+
|
45 |
+
def parse(self, text: str) -> AutoGPTAction:
|
46 |
+
try:
|
47 |
+
parsed = json.loads(text, strict=False)
|
48 |
+
except json.JSONDecodeError:
|
49 |
+
preprocessed_text = preprocess_json_input(text)
|
50 |
+
try:
|
51 |
+
parsed = json.loads(preprocessed_text, strict=False)
|
52 |
+
except Exception:
|
53 |
+
return AutoGPTAction(
|
54 |
+
name="ERROR",
|
55 |
+
args={"error": f"Could not parse invalid json: {text}"},
|
56 |
+
)
|
57 |
+
try:
|
58 |
+
return AutoGPTAction(
|
59 |
+
name=parsed["command"]["name"],
|
60 |
+
args=parsed["command"]["args"],
|
61 |
+
)
|
62 |
+
except (KeyError, TypeError):
|
63 |
+
# If the command is null or incomplete, return an erroneous tool
|
64 |
+
return AutoGPTAction(
|
65 |
+
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
|
66 |
+
)
|
agent/autogpt/prompt.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from typing import Any, Callable, List
|
3 |
+
|
4 |
+
from pydantic import BaseModel
|
5 |
+
|
6 |
+
from .prompt_generator import get_prompt
|
7 |
+
from langchain.prompts.chat import (
|
8 |
+
BaseChatPromptTemplate,
|
9 |
+
)
|
10 |
+
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
|
11 |
+
from langchain.tools.base import BaseTool
|
12 |
+
from langchain.vectorstores.base import VectorStoreRetriever
|
13 |
+
|
14 |
+
|
15 |
+
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
|
16 |
+
ai_name: str
|
17 |
+
ai_role: str
|
18 |
+
tools: List[BaseTool]
|
19 |
+
token_counter: Callable[[str], int]
|
20 |
+
send_token_limit: int = 4196
|
21 |
+
|
22 |
+
def construct_full_prompt(self, goals: List[str]) -> str:
|
23 |
+
prompt_start = """Your decisions must always be made independently
|
24 |
+
without seeking user assistance. Play to your strengths
|
25 |
+
as an LLM and pursue simple strategies with no legal complications.
|
26 |
+
If you have completed all your tasks,
|
27 |
+
make sure to use the "finish" command."""
|
28 |
+
|
29 |
+
# Construct full prompt
|
30 |
+
full_prompt = (
|
31 |
+
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
32 |
+
)
|
33 |
+
for i, goal in enumerate(goals):
|
34 |
+
full_prompt += f"{i+1}. {goal}\n"
|
35 |
+
|
36 |
+
full_prompt += f"\n\n{get_prompt(self.tools)}"
|
37 |
+
return full_prompt
|
38 |
+
|
39 |
+
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
40 |
+
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
|
41 |
+
time_prompt = SystemMessage(
|
42 |
+
content=f"The current time and date is {time.strftime('%c')}"
|
43 |
+
)
|
44 |
+
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
|
45 |
+
time_prompt.content
|
46 |
+
)
|
47 |
+
memory: VectorStoreRetriever = kwargs["memory"]
|
48 |
+
previous_messages = kwargs["messages"]
|
49 |
+
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
|
50 |
+
relevant_memory = [d.page_content for d in relevant_docs]
|
51 |
+
relevant_memory_tokens = sum(
|
52 |
+
[self.token_counter(doc) for doc in relevant_memory]
|
53 |
+
)
|
54 |
+
while used_tokens + relevant_memory_tokens > 2500:
|
55 |
+
relevant_memory = relevant_memory[:-1]
|
56 |
+
relevant_memory_tokens = sum(
|
57 |
+
[self.token_counter(doc) for doc in relevant_memory]
|
58 |
+
)
|
59 |
+
content_format = (
|
60 |
+
f"This reminds you of these events "
|
61 |
+
f"from your past:\n{relevant_memory}\n\n"
|
62 |
+
)
|
63 |
+
memory_message = SystemMessage(content=content_format)
|
64 |
+
used_tokens += len(memory_message.content)
|
65 |
+
historical_messages: List[BaseMessage] = []
|
66 |
+
for message in previous_messages[-10:][::-1]:
|
67 |
+
message_tokens = self.token_counter(message.content)
|
68 |
+
if used_tokens + message_tokens > self.send_token_limit - 1000:
|
69 |
+
break
|
70 |
+
historical_messages = [message] + historical_messages
|
71 |
+
input_message = HumanMessage(content=kwargs["user_input"])
|
72 |
+
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
|
73 |
+
messages += historical_messages
|
74 |
+
messages.append(input_message)
|
75 |
+
return messages
|
agent/autogpt/prompt_generator.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
from langchain.tools.base import BaseTool
|
5 |
+
|
6 |
+
FINISH_NAME = "finish"
|
7 |
+
|
8 |
+
|
9 |
+
class PromptGenerator:
|
10 |
+
"""A class for generating custom prompt strings.
|
11 |
+
|
12 |
+
Does this based on constraints, commands, resources, and performance evaluations.
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(self) -> None:
|
16 |
+
"""Initialize the PromptGenerator object.
|
17 |
+
|
18 |
+
Starts with empty lists of constraints, commands, resources,
|
19 |
+
and performance evaluations.
|
20 |
+
"""
|
21 |
+
self.constraints: List[str] = []
|
22 |
+
self.commands: List[BaseTool] = []
|
23 |
+
self.resources: List[str] = []
|
24 |
+
self.performance_evaluation: List[str] = []
|
25 |
+
self.response_format = {
|
26 |
+
"thoughts": {
|
27 |
+
"text": "thought",
|
28 |
+
"reasoning": "reasoning",
|
29 |
+
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
30 |
+
"criticism": "constructive self-criticism",
|
31 |
+
"speak": "thoughts summary to say to user",
|
32 |
+
},
|
33 |
+
"command": {"name": "command name", "args": {"arg name": "value"}},
|
34 |
+
}
|
35 |
+
|
36 |
+
def add_constraint(self, constraint: str) -> None:
|
37 |
+
"""
|
38 |
+
Add a constraint to the constraints list.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
constraint (str): The constraint to be added.
|
42 |
+
"""
|
43 |
+
self.constraints.append(constraint)
|
44 |
+
|
45 |
+
def add_tool(self, tool: BaseTool) -> None:
|
46 |
+
self.commands.append(tool)
|
47 |
+
|
48 |
+
def _generate_command_string(self, tool: BaseTool) -> str:
|
49 |
+
output = f"{tool.name}: {tool.description}"
|
50 |
+
# json_args = json.dumps(tool.args) if "tool_input" not in tool.args else tool.args[
|
51 |
+
# "tool_input"
|
52 |
+
# ]
|
53 |
+
# output += f", args json schema: {json_args}"
|
54 |
+
return output
|
55 |
+
|
56 |
+
def add_resource(self, resource: str) -> None:
|
57 |
+
"""
|
58 |
+
Add a resource to the resources list.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
resource (str): The resource to be added.
|
62 |
+
"""
|
63 |
+
self.resources.append(resource)
|
64 |
+
|
65 |
+
def add_performance_evaluation(self, evaluation: str) -> None:
|
66 |
+
"""
|
67 |
+
Add a performance evaluation item to the performance_evaluation list.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
evaluation (str): The evaluation item to be added.
|
71 |
+
"""
|
72 |
+
self.performance_evaluation.append(evaluation)
|
73 |
+
|
74 |
+
def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
|
75 |
+
"""
|
76 |
+
Generate a numbered list from given items based on the item_type.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
items (list): A list of items to be numbered.
|
80 |
+
item_type (str, optional): The type of items in the list.
|
81 |
+
Defaults to 'list'.
|
82 |
+
|
83 |
+
Returns:
|
84 |
+
str: The formatted numbered list.
|
85 |
+
"""
|
86 |
+
if item_type == "command":
|
87 |
+
command_strings = [
|
88 |
+
f"{i + 1}. {self._generate_command_string(item)}"
|
89 |
+
for i, item in enumerate(items)
|
90 |
+
]
|
91 |
+
finish_description = (
|
92 |
+
"use this to signal that you have finished all your objectives"
|
93 |
+
)
|
94 |
+
finish_args = (
|
95 |
+
'"response": "final response to let '
|
96 |
+
'people know you have finished your objectives"'
|
97 |
+
)
|
98 |
+
finish_string = (
|
99 |
+
f"{len(items) + 1}. {FINISH_NAME}: "
|
100 |
+
f"{finish_description}, args: {finish_args}"
|
101 |
+
)
|
102 |
+
return "\n".join(command_strings + [finish_string])
|
103 |
+
else:
|
104 |
+
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
105 |
+
|
106 |
+
def generate_prompt_string(self) -> str:
|
107 |
+
"""Generate a prompt string.
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
str: The generated prompt string.
|
111 |
+
"""
|
112 |
+
formatted_response_format = json.dumps(self.response_format, indent=4)
|
113 |
+
prompt_string = (
|
114 |
+
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
115 |
+
f"Commands:\n"
|
116 |
+
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
117 |
+
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
118 |
+
f"Performance Evaluation:\n"
|
119 |
+
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
120 |
+
f"You should only respond in JSON format as described below "
|
121 |
+
f"\nResponse Format: \n{formatted_response_format} "
|
122 |
+
f"\nEnsure the response can be parsed by Python json.loads"
|
123 |
+
)
|
124 |
+
|
125 |
+
return prompt_string
|
126 |
+
|
127 |
+
|
128 |
+
def get_prompt(tools: List[BaseTool]) -> str:
|
129 |
+
"""This function generates a prompt string.
|
130 |
+
|
131 |
+
It includes various constraints, commands, resources, and performance evaluations.
|
132 |
+
|
133 |
+
Returns:
|
134 |
+
str: The generated prompt string.
|
135 |
+
"""
|
136 |
+
|
137 |
+
# Initialize the PromptGenerator object
|
138 |
+
prompt_generator = PromptGenerator()
|
139 |
+
|
140 |
+
# Add constraints to the PromptGenerator object
|
141 |
+
prompt_generator.add_constraint(
|
142 |
+
"~4000 word limit for short term memory. "
|
143 |
+
"Your short term memory is short, "
|
144 |
+
"so immediately save important information to files."
|
145 |
+
)
|
146 |
+
prompt_generator.add_constraint(
|
147 |
+
"If you are unsure how you previously did something "
|
148 |
+
"or want to recall past events, "
|
149 |
+
"thinking about similar events will help you remember."
|
150 |
+
)
|
151 |
+
prompt_generator.add_constraint("No user assistance")
|
152 |
+
prompt_generator.add_constraint(
|
153 |
+
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
154 |
+
)
|
155 |
+
|
156 |
+
# Add commands to the PromptGenerator object
|
157 |
+
for tool in tools:
|
158 |
+
prompt_generator.add_tool(tool)
|
159 |
+
|
160 |
+
# Add resources to the PromptGenerator object
|
161 |
+
prompt_generator.add_resource(
|
162 |
+
"Internet access for searches and information gathering."
|
163 |
+
)
|
164 |
+
prompt_generator.add_resource("Long Term memory management.")
|
165 |
+
prompt_generator.add_resource(
|
166 |
+
"GPT-3.5 powered Agents for delegation of simple tasks."
|
167 |
+
)
|
168 |
+
prompt_generator.add_resource("File output.")
|
169 |
+
|
170 |
+
# Add performance evaluations to the PromptGenerator object
|
171 |
+
prompt_generator.add_performance_evaluation(
|
172 |
+
"Continuously review and analyze your actions "
|
173 |
+
"to ensure you are performing to the best of your abilities."
|
174 |
+
)
|
175 |
+
prompt_generator.add_performance_evaluation(
|
176 |
+
"Constructively self-criticize your big-picture behavior constantly."
|
177 |
+
)
|
178 |
+
prompt_generator.add_performance_evaluation(
|
179 |
+
"Reflect on past decisions and strategies to refine your approach."
|
180 |
+
)
|
181 |
+
prompt_generator.add_performance_evaluation(
|
182 |
+
"Every command has a cost, so be smart and efficient. "
|
183 |
+
"Aim to complete tasks in the least number of steps."
|
184 |
+
)
|
185 |
+
|
186 |
+
# Generate the prompt string
|
187 |
+
prompt_string = prompt_generator.generate_prompt_string()
|
188 |
+
|
189 |
+
return prompt_string
|
agent/autogptmulti/__init__.py
ADDED
File without changes
|
agent/autogptmulti/agent.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import List, Optional
|
4 |
+
|
5 |
+
from pydantic import ValidationError
|
6 |
+
|
7 |
+
from langchain.chains.llm import LLMChain
|
8 |
+
from langchain.chat_models.base import BaseChatModel
|
9 |
+
from .output_parser import (
|
10 |
+
AutoGPTOutputParser,
|
11 |
+
BaseAutoGPTOutputParser,
|
12 |
+
)
|
13 |
+
from .prompt import AutoGPTPrompt
|
14 |
+
from .prompt_generator import (
|
15 |
+
FINISH_NAME,
|
16 |
+
)
|
17 |
+
from langchain.schema import (
|
18 |
+
AIMessage,
|
19 |
+
BaseMessage,
|
20 |
+
Document,
|
21 |
+
HumanMessage,
|
22 |
+
SystemMessage,
|
23 |
+
)
|
24 |
+
from langchain.tools.base import BaseTool
|
25 |
+
from langchain.tools.human.tool import HumanInputRun
|
26 |
+
from langchain.vectorstores.base import VectorStoreRetriever
|
27 |
+
import json
|
28 |
+
|
29 |
+
|
30 |
+
class AutoGPT:
|
31 |
+
"""Agent class for interacting with Auto-GPT."""
|
32 |
+
|
33 |
+
def __init__(
|
34 |
+
self,
|
35 |
+
ai_name: str,
|
36 |
+
memory: VectorStoreRetriever,
|
37 |
+
chain: LLMChain,
|
38 |
+
output_parser: BaseAutoGPTOutputParser,
|
39 |
+
tools: List[BaseTool],
|
40 |
+
feedback_tool: Optional[HumanInputRun] = None,
|
41 |
+
):
|
42 |
+
self.ai_name = ai_name
|
43 |
+
self.memory = memory
|
44 |
+
self.full_message_history: List[BaseMessage] = []
|
45 |
+
self.next_action_count = 0
|
46 |
+
self.chain = chain
|
47 |
+
self.output_parser = output_parser
|
48 |
+
self.tools = tools
|
49 |
+
self.feedback_tool = feedback_tool
|
50 |
+
|
51 |
+
@classmethod
|
52 |
+
def from_llm_and_tools(
|
53 |
+
cls,
|
54 |
+
ai_name: str,
|
55 |
+
ai_role: str,
|
56 |
+
memory: VectorStoreRetriever,
|
57 |
+
tools: List[BaseTool],
|
58 |
+
llm: BaseChatModel,
|
59 |
+
human_in_the_loop: bool = False,
|
60 |
+
output_parser: Optional[BaseAutoGPTOutputParser] = None,
|
61 |
+
) -> AutoGPT:
|
62 |
+
prompt = AutoGPTPrompt(
|
63 |
+
ai_name=ai_name,
|
64 |
+
ai_role=ai_role,
|
65 |
+
tools=tools,
|
66 |
+
input_variables=["memory", "messages", "goals", "user_input"],
|
67 |
+
token_counter=llm.get_num_tokens,
|
68 |
+
)
|
69 |
+
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
|
70 |
+
chain = LLMChain(llm=llm, prompt=prompt)
|
71 |
+
return cls(
|
72 |
+
ai_name,
|
73 |
+
memory,
|
74 |
+
chain,
|
75 |
+
output_parser or AutoGPTOutputParser(),
|
76 |
+
tools,
|
77 |
+
feedback_tool=human_feedback_tool,
|
78 |
+
)
|
79 |
+
|
80 |
+
def __call__(self, goals: List[str]) -> str:
|
81 |
+
user_input = (
|
82 |
+
"Determine which next command to use, "
|
83 |
+
"and respond using the format specified above:"
|
84 |
+
)
|
85 |
+
# Interaction Loop
|
86 |
+
loop_count = 0
|
87 |
+
history_rec = []
|
88 |
+
while True:
|
89 |
+
# Discontinue if continuous limit is reached
|
90 |
+
loop_count += 1
|
91 |
+
# Send message to AI, get response
|
92 |
+
assistant_reply = self.chain.run(
|
93 |
+
goals=goals,
|
94 |
+
messages=self.full_message_history,
|
95 |
+
memory=self.memory,
|
96 |
+
user_input=user_input,
|
97 |
+
)
|
98 |
+
pos = assistant_reply.find("{")
|
99 |
+
if pos > 0:
|
100 |
+
assistant_reply = assistant_reply[pos:]
|
101 |
+
# Print Assistant thoughts
|
102 |
+
print(assistant_reply)
|
103 |
+
self.full_message_history.append(HumanMessage(content=user_input))
|
104 |
+
self.full_message_history.append(AIMessage(content=assistant_reply))
|
105 |
+
|
106 |
+
# Get command name and arguments
|
107 |
+
action = self.output_parser.parse(assistant_reply)
|
108 |
+
tools = {t.name: t for t in self.tools}
|
109 |
+
|
110 |
+
if action.name == FINISH_NAME:
|
111 |
+
return action.args["response"]
|
112 |
+
if action.name in tools:
|
113 |
+
tool = tools[action.name]
|
114 |
+
try:
|
115 |
+
# for tools in swarms.tools, the input should be string, while for default langchain toosl, the input is in json format, here we modify the following code
|
116 |
+
tmp_json = action.args.copy()
|
117 |
+
tmp_json["history context"] = str(history_rec[-5:])[-500:]
|
118 |
+
tmp_json["user message"] = goals[0]
|
119 |
+
json_args = str(tmp_json).replace("'", '"')
|
120 |
+
observation = tool.run(json_args)
|
121 |
+
except ValidationError as e:
|
122 |
+
observation = f"Error in args: {str(e)}"
|
123 |
+
result = f"Command {tool.name} returned: {observation}"
|
124 |
+
if (
|
125 |
+
result.find("using the given APIs") == -1
|
126 |
+
and result.lower().find("no answer") == -1
|
127 |
+
):
|
128 |
+
history_rec.append(f"Tool {action.name} returned: {observation}")
|
129 |
+
elif action.name == "ERROR":
|
130 |
+
result = f"Error: {action.args}. "
|
131 |
+
else:
|
132 |
+
result = (
|
133 |
+
f"Unknown command '{action.name}'. "
|
134 |
+
f"Please refer to the 'COMMANDS' list for available "
|
135 |
+
f"commands and only respond in the specified JSON format."
|
136 |
+
)
|
137 |
+
|
138 |
+
self.full_message_history.append(SystemMessage(content=result))
|
agent/autogptmulti/output_parser.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
from abc import abstractmethod
|
4 |
+
from typing import Dict, NamedTuple
|
5 |
+
|
6 |
+
from langchain.schema import BaseOutputParser
|
7 |
+
|
8 |
+
|
9 |
+
class AutoGPTAction(NamedTuple):
|
10 |
+
"""Action returned by AutoGPTOutputParser."""
|
11 |
+
|
12 |
+
name: str
|
13 |
+
args: Dict
|
14 |
+
|
15 |
+
|
16 |
+
class BaseAutoGPTOutputParser(BaseOutputParser):
|
17 |
+
"""Base Output parser for AutoGPT."""
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def parse(self, text: str) -> AutoGPTAction:
|
21 |
+
"""Return AutoGPTAction"""
|
22 |
+
|
23 |
+
|
24 |
+
def preprocess_json_input(input_str: str) -> str:
|
25 |
+
"""Preprocesses a string to be parsed as json.
|
26 |
+
|
27 |
+
Replace single backslashes with double backslashes,
|
28 |
+
while leaving already escaped ones intact.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
input_str: String to be preprocessed
|
32 |
+
|
33 |
+
Returns:
|
34 |
+
Preprocessed string
|
35 |
+
"""
|
36 |
+
corrected_str = re.sub(
|
37 |
+
r'(?<!\\)\\(?!["\\/bfnrt]|u[0-9a-fA-F]{4})', r"\\\\", input_str
|
38 |
+
)
|
39 |
+
return corrected_str
|
40 |
+
|
41 |
+
|
42 |
+
class AutoGPTOutputParser(BaseAutoGPTOutputParser):
|
43 |
+
"""Output parser for AutoGPT."""
|
44 |
+
|
45 |
+
def parse(self, text: str) -> AutoGPTAction:
|
46 |
+
try:
|
47 |
+
parsed = json.loads(text, strict=False)
|
48 |
+
except json.JSONDecodeError:
|
49 |
+
preprocessed_text = preprocess_json_input(text)
|
50 |
+
try:
|
51 |
+
parsed = json.loads(preprocessed_text, strict=False)
|
52 |
+
except Exception:
|
53 |
+
return AutoGPTAction(
|
54 |
+
name="ERROR",
|
55 |
+
args={"error": f"Could not parse invalid json: {text}"},
|
56 |
+
)
|
57 |
+
try:
|
58 |
+
return AutoGPTAction(
|
59 |
+
name=parsed["command"]["name"],
|
60 |
+
args=parsed["command"]["args"],
|
61 |
+
)
|
62 |
+
except (KeyError, TypeError):
|
63 |
+
# If the command is null or incomplete, return an erroneous tool
|
64 |
+
return AutoGPTAction(
|
65 |
+
name="ERROR", args={"error": f"Incomplete command args: {parsed}"}
|
66 |
+
)
|
agent/autogptmulti/prompt.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from typing import Any, Callable, List
|
3 |
+
|
4 |
+
from pydantic import BaseModel
|
5 |
+
|
6 |
+
from .prompt_generator import get_prompt
|
7 |
+
from langchain.prompts.chat import (
|
8 |
+
BaseChatPromptTemplate,
|
9 |
+
)
|
10 |
+
from langchain.schema import BaseMessage, HumanMessage, SystemMessage
|
11 |
+
from langchain.tools.base import BaseTool
|
12 |
+
from langchain.vectorstores.base import VectorStoreRetriever
|
13 |
+
|
14 |
+
|
15 |
+
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
|
16 |
+
ai_name: str
|
17 |
+
ai_role: str
|
18 |
+
tools: List[BaseTool]
|
19 |
+
token_counter: Callable[[str], int]
|
20 |
+
send_token_limit: int = 4196
|
21 |
+
|
22 |
+
def construct_full_prompt(self, goals: List[str]) -> str:
|
23 |
+
prompt_start = """Your decisions must always be made independently
|
24 |
+
without seeking user assistance. Play to your strengths
|
25 |
+
as an LLM and pursue simple strategies with no legal complications.
|
26 |
+
Once you have completed your goal or have found that it can not be finished with current commands,
|
27 |
+
make sure to use the "finish" command immediately."""
|
28 |
+
|
29 |
+
# Construct full prompt
|
30 |
+
full_prompt = (
|
31 |
+
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
32 |
+
)
|
33 |
+
if isinstance(goals, list):
|
34 |
+
for i, goal in enumerate(goals):
|
35 |
+
full_prompt += f"{i+1}. {goal}\n"
|
36 |
+
else:
|
37 |
+
full_prompt += f"{goals}\n"
|
38 |
+
full_prompt += f"\n\n{get_prompt(self.tools)}"
|
39 |
+
return full_prompt
|
40 |
+
|
41 |
+
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
42 |
+
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
|
43 |
+
time_prompt = SystemMessage(
|
44 |
+
content=f"The current time and date is {time.strftime('%c')}"
|
45 |
+
)
|
46 |
+
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
|
47 |
+
time_prompt.content
|
48 |
+
)
|
49 |
+
memory: VectorStoreRetriever = kwargs["memory"]
|
50 |
+
previous_messages = kwargs["messages"]
|
51 |
+
|
52 |
+
content_format = (
|
53 |
+
f"This reminds you of these events "
|
54 |
+
f"you have already used, and NEVER conduct repeated or unrelated commands:\n"
|
55 |
+
)
|
56 |
+
memory_message = SystemMessage(content=content_format)
|
57 |
+
used_tokens += len(memory_message.content)
|
58 |
+
historical_messages: List[BaseMessage] = []
|
59 |
+
for message in previous_messages[-10:][::-1]:
|
60 |
+
message_tokens = self.token_counter(message.content)
|
61 |
+
if used_tokens + message_tokens > self.send_token_limit - 1000:
|
62 |
+
break
|
63 |
+
historical_messages = [message] + historical_messages
|
64 |
+
input_message = HumanMessage(content=kwargs["user_input"])
|
65 |
+
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
|
66 |
+
messages += historical_messages
|
67 |
+
messages.append(input_message)
|
68 |
+
return messages
|
agent/autogptmulti/prompt_generator.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
from langchain.tools.base import BaseTool
|
5 |
+
|
6 |
+
FINISH_NAME = "finish"
|
7 |
+
|
8 |
+
|
9 |
+
class PromptGenerator:
|
10 |
+
"""A class for generating custom prompt strings.
|
11 |
+
|
12 |
+
Does this based on constraints, commands, resources, and performance evaluations.
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(self) -> None:
|
16 |
+
"""Initialize the PromptGenerator object.
|
17 |
+
|
18 |
+
Starts with empty lists of constraints, commands, resources,
|
19 |
+
and performance evaluations.
|
20 |
+
"""
|
21 |
+
self.constraints: List[str] = []
|
22 |
+
self.commands: List[BaseTool] = []
|
23 |
+
self.resources: List[str] = []
|
24 |
+
self.performance_evaluation: List[str] = []
|
25 |
+
self.response_format = {
|
26 |
+
"thoughts": {
|
27 |
+
"text": "thought",
|
28 |
+
"reasoning": "reasoning",
|
29 |
+
},
|
30 |
+
"command": {
|
31 |
+
"name": "command name",
|
32 |
+
"args": {
|
33 |
+
"goal": "the detailed description and necessary information of the subtask that you hope current command can achieve"
|
34 |
+
},
|
35 |
+
},
|
36 |
+
}
|
37 |
+
|
38 |
+
def add_constraint(self, constraint: str) -> None:
|
39 |
+
"""
|
40 |
+
Add a constraint to the constraints list.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
constraint (str): The constraint to be added.
|
44 |
+
"""
|
45 |
+
self.constraints.append(constraint)
|
46 |
+
|
47 |
+
def add_tool(self, tool: BaseTool) -> None:
|
48 |
+
self.commands.append(tool)
|
49 |
+
|
50 |
+
def _generate_command_string(self, tool: BaseTool) -> str:
|
51 |
+
output = f"{tool.name}: {tool.description}"
|
52 |
+
# json_args = json.dumps(tool.args) if "tool_input" not in tool.args else tool.args[
|
53 |
+
# "tool_input"
|
54 |
+
# ]
|
55 |
+
# output += f", args json schema: {json_args}"
|
56 |
+
return output
|
57 |
+
|
58 |
+
def add_resource(self, resource: str) -> None:
|
59 |
+
"""
|
60 |
+
Add a resource to the resources list.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
resource (str): The resource to be added.
|
64 |
+
"""
|
65 |
+
self.resources.append(resource)
|
66 |
+
|
67 |
+
def add_performance_evaluation(self, evaluation: str) -> None:
|
68 |
+
"""
|
69 |
+
Add a performance evaluation item to the performance_evaluation list.
|
70 |
+
|
71 |
+
Args:
|
72 |
+
evaluation (str): The evaluation item to be added.
|
73 |
+
"""
|
74 |
+
self.performance_evaluation.append(evaluation)
|
75 |
+
|
76 |
+
def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
|
77 |
+
"""
|
78 |
+
Generate a numbered list from given items based on the item_type.
|
79 |
+
|
80 |
+
Args:
|
81 |
+
items (list): A list of items to be numbered.
|
82 |
+
item_type (str, optional): The type of items in the list.
|
83 |
+
Defaults to 'list'.
|
84 |
+
|
85 |
+
Returns:
|
86 |
+
str: The formatted numbered list.
|
87 |
+
"""
|
88 |
+
if item_type == "command":
|
89 |
+
command_strings = [
|
90 |
+
f"{i + 1}. {self._generate_command_string(item)}"
|
91 |
+
for i, item in enumerate(items)
|
92 |
+
]
|
93 |
+
finish_description = (
|
94 |
+
"use this to signal that you have finished all your objectives"
|
95 |
+
)
|
96 |
+
finish_args = (
|
97 |
+
'"response": "final response to let '
|
98 |
+
'people know you have finished your objectives"'
|
99 |
+
)
|
100 |
+
finish_string = (
|
101 |
+
f"{len(items) + 1}. {FINISH_NAME}: "
|
102 |
+
f"{finish_description}, args: {finish_args}"
|
103 |
+
)
|
104 |
+
return "\n".join(command_strings + [finish_string])
|
105 |
+
else:
|
106 |
+
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
107 |
+
|
108 |
+
def generate_prompt_string(self) -> str:
|
109 |
+
"""Generate a prompt string.
|
110 |
+
|
111 |
+
Returns:
|
112 |
+
str: The generated prompt string.
|
113 |
+
"""
|
114 |
+
formatted_response_format = json.dumps(self.response_format, indent=4)
|
115 |
+
prompt_string = (
|
116 |
+
# f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
117 |
+
f"Commands:\n"
|
118 |
+
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
119 |
+
# f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
120 |
+
f"Performance Evaluation:\n"
|
121 |
+
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
122 |
+
f"You should only respond in JSON format as described below "
|
123 |
+
f"\nResponse Format: \n{formatted_response_format} "
|
124 |
+
f"\nEnsure the response can be parsed by Python json.loads"
|
125 |
+
)
|
126 |
+
|
127 |
+
return prompt_string
|
128 |
+
|
129 |
+
|
130 |
+
def get_prompt(tools: List[BaseTool]) -> str:
|
131 |
+
"""This function generates a prompt string.
|
132 |
+
|
133 |
+
It includes various constraints, commands, resources, and performance evaluations.
|
134 |
+
|
135 |
+
Returns:
|
136 |
+
str: The generated prompt string.
|
137 |
+
"""
|
138 |
+
|
139 |
+
# Initialize the PromptGenerator object
|
140 |
+
prompt_generator = PromptGenerator()
|
141 |
+
|
142 |
+
# Add constraints to the PromptGenerator object
|
143 |
+
prompt_generator.add_constraint(
|
144 |
+
"~4000 word limit for short term memory. "
|
145 |
+
"Your short term memory is short, "
|
146 |
+
"so immediately save important information to files."
|
147 |
+
)
|
148 |
+
prompt_generator.add_constraint(
|
149 |
+
"If you are unsure how you previously did something "
|
150 |
+
"or want to recall past events, "
|
151 |
+
"thinking about similar events will help you remember."
|
152 |
+
)
|
153 |
+
prompt_generator.add_constraint("No user assistance")
|
154 |
+
prompt_generator.add_constraint(
|
155 |
+
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
156 |
+
)
|
157 |
+
|
158 |
+
# Add commands to the PromptGenerator object
|
159 |
+
for tool in tools:
|
160 |
+
prompt_generator.add_tool(tool)
|
161 |
+
|
162 |
+
# Add resources to the PromptGenerator object
|
163 |
+
prompt_generator.add_resource(
|
164 |
+
"Internet access for searches and information gathering."
|
165 |
+
)
|
166 |
+
prompt_generator.add_resource("Long Term memory management.")
|
167 |
+
prompt_generator.add_resource(
|
168 |
+
"GPT-3.5 powered Agents for delegation of simple tasks."
|
169 |
+
)
|
170 |
+
prompt_generator.add_resource("File output.")
|
171 |
+
|
172 |
+
# Add performance evaluations to the PromptGenerator object
|
173 |
+
prompt_generator.add_performance_evaluation(
|
174 |
+
"Continuously review and analyze your actions "
|
175 |
+
"to ensure you are performing to the best of your abilities."
|
176 |
+
)
|
177 |
+
prompt_generator.add_performance_evaluation(
|
178 |
+
"Constructively self-criticize your big-picture behavior constantly."
|
179 |
+
)
|
180 |
+
prompt_generator.add_performance_evaluation(
|
181 |
+
"Reflect on past decisions and strategies to refine your approach."
|
182 |
+
)
|
183 |
+
prompt_generator.add_performance_evaluation(
|
184 |
+
"Every command has a cost, so be smart and efficient. "
|
185 |
+
"Aim to complete tasks in the least number of steps."
|
186 |
+
)
|
187 |
+
|
188 |
+
# Generate the prompt string
|
189 |
+
prompt_string = prompt_generator.generate_prompt_string()
|
190 |
+
|
191 |
+
return prompt_string
|
agent/executor.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import types
|
3 |
+
from typing import Any, Dict, List, Tuple, Union
|
4 |
+
from langchain.agents import AgentExecutor
|
5 |
+
from langchain.input import get_color_mapping
|
6 |
+
from langchain.schema import AgentAction, AgentFinish
|
7 |
+
from .translator import Translator
|
8 |
+
|
9 |
+
|
10 |
+
class AgentExecutorWithTranslation(AgentExecutor):
|
11 |
+
translator: Translator = Translator()
|
12 |
+
|
13 |
+
def prep_outputs(
|
14 |
+
self,
|
15 |
+
inputs: Dict[str, str],
|
16 |
+
outputs: Dict[str, str],
|
17 |
+
return_only_outputs: bool = False,
|
18 |
+
) -> Dict[str, str]:
|
19 |
+
try:
|
20 |
+
outputs = super().prep_outputs(inputs, outputs, return_only_outputs)
|
21 |
+
except ValueError as e:
|
22 |
+
return outputs
|
23 |
+
else:
|
24 |
+
if "input" in outputs:
|
25 |
+
outputs = self.translator(outputs)
|
26 |
+
return outputs
|
27 |
+
|
28 |
+
|
29 |
+
class Executor(AgentExecutorWithTranslation):
|
30 |
+
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
31 |
+
"""Run text through and get agent response."""
|
32 |
+
# Construct a mapping of tool name to tool for easy lookup
|
33 |
+
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
34 |
+
# We construct a mapping from each tool to a color, used for logging.
|
35 |
+
color_mapping = get_color_mapping(
|
36 |
+
[tool.name for tool in self.tools], excluded_colors=["green"]
|
37 |
+
)
|
38 |
+
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
39 |
+
# Let's start tracking the iterations the agent has gone through
|
40 |
+
iterations = 0
|
41 |
+
time_elapsed = 0.0
|
42 |
+
start_time = time.time()
|
43 |
+
# We now enter the agent loop (until it returns something).
|
44 |
+
while self._should_continue(iterations, time_elapsed):
|
45 |
+
next_step_output = self._take_next_step(
|
46 |
+
name_to_tool_map, color_mapping, inputs, intermediate_steps
|
47 |
+
)
|
48 |
+
if isinstance(next_step_output, AgentFinish):
|
49 |
+
yield self._return(next_step_output, intermediate_steps)
|
50 |
+
return
|
51 |
+
|
52 |
+
for i, output in enumerate(next_step_output):
|
53 |
+
agent_action = output[0]
|
54 |
+
tool_logo = None
|
55 |
+
for tool in self.tools:
|
56 |
+
if tool.name == agent_action.tool:
|
57 |
+
tool_logo = tool.tool_logo_md
|
58 |
+
if isinstance(output[1], types.GeneratorType):
|
59 |
+
logo = f"{tool_logo}" if tool_logo is not None else ""
|
60 |
+
yield (
|
61 |
+
AgentAction("", agent_action.tool_input, agent_action.log),
|
62 |
+
f"Further use other tool {logo} to answer the question.",
|
63 |
+
)
|
64 |
+
for out in output[1]:
|
65 |
+
yield out
|
66 |
+
next_step_output[i] = (agent_action, out)
|
67 |
+
else:
|
68 |
+
for tool in self.tools:
|
69 |
+
if tool.name == agent_action.tool:
|
70 |
+
yield (
|
71 |
+
AgentAction(
|
72 |
+
tool_logo, agent_action.tool_input, agent_action.log
|
73 |
+
),
|
74 |
+
output[1],
|
75 |
+
)
|
76 |
+
|
77 |
+
intermediate_steps.extend(next_step_output)
|
78 |
+
if len(next_step_output) == 1:
|
79 |
+
next_step_action = next_step_output[0]
|
80 |
+
# See if tool should return directly
|
81 |
+
tool_return = self._get_tool_return(next_step_action)
|
82 |
+
if tool_return is not None:
|
83 |
+
yield self._return(tool_return, intermediate_steps)
|
84 |
+
return
|
85 |
+
iterations += 1
|
86 |
+
time_elapsed = time.time() - start_time
|
87 |
+
output = self.agent.return_stopped_response(
|
88 |
+
self.early_stopping_method, intermediate_steps, **inputs
|
89 |
+
)
|
90 |
+
yield self._return(output, intermediate_steps)
|
91 |
+
return
|
92 |
+
|
93 |
+
def __call__(
|
94 |
+
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
|
95 |
+
) -> Dict[str, Any]:
|
96 |
+
"""Run the logic of this chain and add to output if desired.
|
97 |
+
|
98 |
+
Args:
|
99 |
+
inputs: Dictionary of inputs, or single input if chain expects
|
100 |
+
only one param.
|
101 |
+
return_only_outputs: boolean for whether to return only outputs in the
|
102 |
+
response. If True, only new keys generated by this chain will be
|
103 |
+
returned. If False, both input keys and new keys generated by this
|
104 |
+
chain will be returned. Defaults to False.
|
105 |
+
|
106 |
+
"""
|
107 |
+
inputs = self.prep_inputs(inputs)
|
108 |
+
self.callback_manager.on_chain_start(
|
109 |
+
{"name": self.__class__.__name__},
|
110 |
+
inputs,
|
111 |
+
verbose=self.verbose,
|
112 |
+
)
|
113 |
+
try:
|
114 |
+
for output in self._call(inputs):
|
115 |
+
if type(output) is dict:
|
116 |
+
output = self.prep_outputs(inputs, output, return_only_outputs)
|
117 |
+
yield output
|
118 |
+
except (KeyboardInterrupt, Exception) as e:
|
119 |
+
self.callback_manager.on_chain_error(e, verbose=self.verbose)
|
120 |
+
raise e
|
121 |
+
self.callback_manager.on_chain_end(output, verbose=self.verbose)
|
122 |
+
# return self.prep_outputs(inputs, output, return_only_outputs)
|
123 |
+
return output
|
agent/singletool.py
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import OpenAI
|
2 |
+
from langchain import OpenAI, LLMChain, PromptTemplate, SerpAPIWrapper
|
3 |
+
from langchain.agents import ZeroShotAgent, AgentExecutor, initialize_agent, Tool
|
4 |
+
import importlib
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import requests
|
8 |
+
import yaml
|
9 |
+
from swarms.tools.agent.apitool import RequestTool
|
10 |
+
from swarms.tools.agent.executor import Executor, AgentExecutorWithTranslation
|
11 |
+
from swarms.tools import get_logger
|
12 |
+
from swarms.tools.agent.BabyagiTools import BabyAGI
|
13 |
+
|
14 |
+
# from swarms.tools
|
15 |
+
# .models.customllm import CustomLLM
|
16 |
+
|
17 |
+
|
18 |
+
logger = get_logger(__name__)
|
19 |
+
|
20 |
+
|
21 |
+
def import_all_apis(tool_json):
|
22 |
+
"""import all apis that is a tool"""
|
23 |
+
doc_url = tool_json["api"]["url"]
|
24 |
+
response = requests.get(doc_url)
|
25 |
+
|
26 |
+
logger.info("Doc string URL: {}".format(doc_url))
|
27 |
+
if doc_url.endswith("yaml") or doc_url.endswith("yml"):
|
28 |
+
plugin = yaml.safe_load(response.text)
|
29 |
+
else:
|
30 |
+
plugin = json.loads(response.text)
|
31 |
+
|
32 |
+
server_url = plugin["servers"][0]["url"]
|
33 |
+
if server_url.startswith("/"):
|
34 |
+
server_url = "http://127.0.0.1:8079" + server_url
|
35 |
+
logger.info("server_url {}".format(server_url))
|
36 |
+
all_apis = []
|
37 |
+
for key in plugin["paths"]:
|
38 |
+
value = plugin["paths"][key]
|
39 |
+
for method in value:
|
40 |
+
api = RequestTool(
|
41 |
+
root_url=server_url, func_url=key, method=method, request_info=value
|
42 |
+
)
|
43 |
+
all_apis.append(api)
|
44 |
+
return all_apis
|
45 |
+
|
46 |
+
|
47 |
+
def load_single_tools(tool_name, tool_url):
|
48 |
+
# tool_name, tool_url = "datasette", "https://datasette.io/"
|
49 |
+
# tool_name, tool_url = "klarna", "https://www.klarna.com/"
|
50 |
+
# tool_name, tool_url = 'chemical-prop', "http://127.0.0.1:8079/tools/chemical-prop/"
|
51 |
+
# tool_name, tool_url = 'douban-film', "http://127.0.0.1:8079/tools/douban-film/"
|
52 |
+
# tool_name, tool_url = 'weather', "http://127.0.0.1:8079/tools/weather/"
|
53 |
+
# tool_name, tool_url = 'wikipedia', "http://127.0.0.1:8079/tools/wikipedia/"
|
54 |
+
# tool_name, tool_url = 'wolframalpha', "http://127.0.0.1:8079/tools/wolframalpha/"
|
55 |
+
# tool_name, tool_url = 'klarna', "https://www.klarna.com/"
|
56 |
+
|
57 |
+
get_url = tool_url + ".well-known/ai-plugin.json"
|
58 |
+
response = requests.get(get_url)
|
59 |
+
|
60 |
+
if response.status_code == 200:
|
61 |
+
tool_config_json = response.json()
|
62 |
+
else:
|
63 |
+
raise RuntimeError("Your URL of the tool is invalid.")
|
64 |
+
|
65 |
+
return tool_name, tool_config_json
|
66 |
+
|
67 |
+
|
68 |
+
class STQuestionAnswerer:
|
69 |
+
def __init__(self, openai_api_key="", stream_output=False, llm="ChatGPT"):
|
70 |
+
if len(openai_api_key) < 3: # not valid key (TODO: more rigorous checking)
|
71 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
72 |
+
|
73 |
+
self.openai_api_key = openai_api_key
|
74 |
+
self.llm_model = llm
|
75 |
+
|
76 |
+
self.set_openai_api_key(openai_api_key)
|
77 |
+
self.stream_output = stream_output
|
78 |
+
|
79 |
+
def set_openai_api_key(self, key):
|
80 |
+
logger.info("Using {}".format(self.llm_model))
|
81 |
+
|
82 |
+
if self.llm_model == "GPT-3.5":
|
83 |
+
self.llm = OpenAI(temperature=0.0, openai_api_key=key) # use text-darvinci
|
84 |
+
elif self.llm_model == "ChatGPT":
|
85 |
+
self.llm = OpenAI(
|
86 |
+
model_name="gpt-3.5-turbo", temperature=0.0, openai_api_key=key
|
87 |
+
) # use chatgpt
|
88 |
+
else:
|
89 |
+
raise RuntimeError("Your model is not available.")
|
90 |
+
|
91 |
+
def load_tools(
|
92 |
+
self,
|
93 |
+
name,
|
94 |
+
meta_info,
|
95 |
+
prompt_type="react-with-tool-description",
|
96 |
+
return_intermediate_steps=True,
|
97 |
+
):
|
98 |
+
self.all_tools_map = {}
|
99 |
+
self.all_tools_map[name] = import_all_apis(meta_info)
|
100 |
+
|
101 |
+
logger.info(
|
102 |
+
"Tool [{}] has the following apis: {}".format(
|
103 |
+
name, self.all_tools_map[name]
|
104 |
+
)
|
105 |
+
)
|
106 |
+
|
107 |
+
if prompt_type == "zero-shot-react-description":
|
108 |
+
subagent = initialize_agent(
|
109 |
+
self.all_tools_map[name],
|
110 |
+
self.llm,
|
111 |
+
agent="zero-shot-react-description",
|
112 |
+
verbose=True,
|
113 |
+
return_intermediate_steps=return_intermediate_steps,
|
114 |
+
)
|
115 |
+
elif prompt_type == "react-with-tool-description":
|
116 |
+
# customllm = CustomLLM()
|
117 |
+
description_for_model = (
|
118 |
+
meta_info["description_for_model"]
|
119 |
+
.replace("{", "{{")
|
120 |
+
.replace("}", "}}")
|
121 |
+
.strip()
|
122 |
+
)
|
123 |
+
|
124 |
+
prefix = f"""Answer the following questions as best you can. General instructions are: {description_for_model}. Specifically, you have access to the following APIs:"""
|
125 |
+
# suffix = """Begin! Remember: (1) Follow the format, i.e,\nThought:\nAction:\nAction Input:\nObservation:\nFinal Answer:\n (2) Provide as much as useful information in your Final Answer. (3) YOU MUST INCLUDE all relevant IMAGES in your Final Answer using format ![img](url), and include relevant links. (3) Do not make up anything, and if your Observation has no link, DO NOT hallucihate one. (4) If you have enough information, please use \nThought: I have got enough information\nFinal Answer: \n\nQuestion: {input}\n{agent_scratchpad}"""
|
126 |
+
suffix = """Begin! Remember: (1) Follow the format, i.e,\nThought:\nAction:\nAction Input:\nObservation:\nFinal Answer:\n. The action you generate must be exact one of the given API names instead of a sentence or any other redundant text. The action input is one json format dict without any redundant text or bracket descriptions . (2) Provide as much as useful information (such as useful values/file paths in your observation) in your Final Answer. Do not describe the process you achieve the goal, but only provide the detailed answer or response to the task goal. (3) Do not make up anything. DO NOT generate observation content by yourself. (4) Read the observation carefully, and pay attention to the messages even if an error occurs. (5) Once you have enough information, please immediately use \nThought: I have got enough information\nFinal Answer: \n\nTask: {input}\n{agent_scratchpad}"""
|
127 |
+
|
128 |
+
prompt = ZeroShotAgent.create_prompt(
|
129 |
+
self.all_tools_map[name],
|
130 |
+
prefix=prefix,
|
131 |
+
suffix=suffix,
|
132 |
+
input_variables=["input", "agent_scratchpad"],
|
133 |
+
)
|
134 |
+
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
|
135 |
+
# llm_chain = LLMChain(llm=customllm, prompt=prompt)
|
136 |
+
logger.info("Full prompt template: {}".format(prompt.template))
|
137 |
+
tool_names = [tool.name for tool in self.all_tools_map[name]]
|
138 |
+
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
|
139 |
+
if self.stream_output:
|
140 |
+
agent_executor = Executor.from_agent_and_tools(
|
141 |
+
agent=agent,
|
142 |
+
tools=self.all_tools_map[name],
|
143 |
+
verbose=True,
|
144 |
+
return_intermediate_steps=return_intermediate_steps,
|
145 |
+
)
|
146 |
+
else:
|
147 |
+
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(
|
148 |
+
agent=agent,
|
149 |
+
tools=self.all_tools_map[name],
|
150 |
+
verbose=True,
|
151 |
+
return_intermediate_steps=return_intermediate_steps,
|
152 |
+
)
|
153 |
+
return agent_executor
|
154 |
+
elif prompt_type == "babyagi":
|
155 |
+
# customllm = CustomLLM()
|
156 |
+
tool_str = "; ".join([t.name for t in self.all_tools_map[name]])
|
157 |
+
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\n You have access to the following APIs:"""
|
158 |
+
suffix = (
|
159 |
+
"""YOUR CONSTRAINTS: (1) YOU MUST follow this format:
|
160 |
+
\nThought:\nAction:\nAction Input: \n or \nThought:\nFinal Answer:\n (2) Do not make up anything, and if your Observation has no link, DO NOT hallucihate one. (3) The Action: MUST be one of the following: """
|
161 |
+
+ tool_str
|
162 |
+
+ """\nQuestion: {task}\n Agent scratchpad (history actions): {agent_scratchpad}."""
|
163 |
+
)
|
164 |
+
|
165 |
+
prompt = ZeroShotAgent.create_prompt(
|
166 |
+
self.all_tools_map[name],
|
167 |
+
prefix=prefix,
|
168 |
+
suffix=suffix,
|
169 |
+
input_variables=["objective", "task", "context", "agent_scratchpad"],
|
170 |
+
)
|
171 |
+
|
172 |
+
logger.info("Full prompt template: {}".format(prompt.template))
|
173 |
+
# specify the maximum number of iterations you want babyAGI to perform
|
174 |
+
max_iterations = 10
|
175 |
+
baby_agi = BabyAGI.from_llm(
|
176 |
+
llm=self.llm,
|
177 |
+
# llm=customllm,
|
178 |
+
prompt=prompt,
|
179 |
+
verbose=False,
|
180 |
+
tools=self.all_tools_map[name],
|
181 |
+
stream_output=self.stream_output,
|
182 |
+
return_intermediate_steps=return_intermediate_steps,
|
183 |
+
max_iterations=max_iterations,
|
184 |
+
)
|
185 |
+
|
186 |
+
return baby_agi
|
187 |
+
elif prompt_type == "autogpt":
|
188 |
+
from langchain.vectorstores import FAISS
|
189 |
+
from langchain.docstore import InMemoryDocstore
|
190 |
+
from langchain.embeddings import OpenAIEmbeddings
|
191 |
+
from langchain.tools.file_management.write import WriteFileTool
|
192 |
+
from langchain.tools.file_management.read import ReadFileTool
|
193 |
+
|
194 |
+
# Define your embedding model
|
195 |
+
embeddings_model = OpenAIEmbeddings()
|
196 |
+
# Initialize the vectorstore as empty
|
197 |
+
import faiss
|
198 |
+
|
199 |
+
embedding_size = 1536
|
200 |
+
index = faiss.IndexFlatL2(embedding_size)
|
201 |
+
vectorstore = FAISS(
|
202 |
+
embeddings_model.embed_query, index, InMemoryDocstore({}), {}
|
203 |
+
)
|
204 |
+
|
205 |
+
from .autogpt.agent import AutoGPT
|
206 |
+
from langchain.chat_models import ChatOpenAI
|
207 |
+
from langchain.schema import (
|
208 |
+
AIMessage,
|
209 |
+
ChatGeneration,
|
210 |
+
ChatMessage,
|
211 |
+
ChatResult,
|
212 |
+
HumanMessage,
|
213 |
+
SystemMessage,
|
214 |
+
)
|
215 |
+
|
216 |
+
# customllm = CustomLLM()
|
217 |
+
# class MyChatOpenAI(ChatOpenAI):
|
218 |
+
# def _create_chat_result(self, response):
|
219 |
+
# generations = []
|
220 |
+
# for res in response["choices"]:
|
221 |
+
# message = self._convert_dict_to_message(res["message"])
|
222 |
+
# gen = ChatGeneration(message=message)
|
223 |
+
# generations.append(gen)
|
224 |
+
# llm_output = {"token_usage": response["usage"], "model_name": self.model_name}
|
225 |
+
# return ChatResult(generations=generations, llm_output=llm_output)
|
226 |
+
|
227 |
+
# def _generate(self, messages, stop):
|
228 |
+
# message_dicts, params = self._create_message_dicts(messages, stop)
|
229 |
+
# response = customllm(message_dicts)
|
230 |
+
# response = json.loads(response)
|
231 |
+
# # response = self.completion_with_retry(messages=message_dicts, **params)
|
232 |
+
# return self._create_chat_result(response)
|
233 |
+
|
234 |
+
# def _convert_dict_to_message(self, _dict: dict):
|
235 |
+
# role = _dict["role"]
|
236 |
+
# if role == "user":
|
237 |
+
# return HumanMessage(content=_dict["content"])
|
238 |
+
# elif role == "assistant":
|
239 |
+
# return AIMessage(content=_dict["content"])
|
240 |
+
# elif role == "system":
|
241 |
+
# return SystemMessage(content=_dict["content"])
|
242 |
+
# else:
|
243 |
+
# return ChatMessage(content=_dict["content"], role=role)
|
244 |
+
|
245 |
+
# should integrate WriteFile and ReadFile into tools, will fix later.
|
246 |
+
# for tool in [WriteFileTool(), ReadFileTool()]:
|
247 |
+
# self.all_tools_map[name].append(tool)
|
248 |
+
|
249 |
+
agent = AutoGPT.from_llm_and_tools(
|
250 |
+
ai_name="Tom",
|
251 |
+
ai_role="Assistant",
|
252 |
+
tools=self.all_tools_map[name],
|
253 |
+
llm=ChatOpenAI(temperature=0),
|
254 |
+
# llm=MyChatOpenAI(temperature=0),
|
255 |
+
memory=vectorstore.as_retriever(),
|
256 |
+
)
|
257 |
+
# Set verbose to be true
|
258 |
+
agent.chain.verbose = True
|
259 |
+
return agent
|
260 |
+
|
261 |
+
|
262 |
+
if __name__ == "__main__":
|
263 |
+
tools_name, tools_config = load_single_tools()
|
264 |
+
print(tools_name, tools_config)
|
265 |
+
|
266 |
+
qa = STQuestionAnswerer()
|
267 |
+
|
268 |
+
agent = qa.load_tools(tools_name, tools_config)
|
269 |
+
|
270 |
+
agent("Calc integral of sin(x)+2x^2+3x+1 from 0 to 1")
|
agent/tools_controller.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import OpenAI
|
2 |
+
from langchain import OpenAI, LLMChain
|
3 |
+
from langchain.agents import ZeroShotAgent, AgentExecutor
|
4 |
+
import importlib
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import requests
|
8 |
+
import yaml
|
9 |
+
from swarms.tools.agent.apitool import Tool
|
10 |
+
from swarms.tools.agent.singletool import STQuestionAnswerer
|
11 |
+
from swarms.tools.agent.executor import Executor, AgentExecutorWithTranslation
|
12 |
+
from swarms.tools import get_logger
|
13 |
+
from swarms.tools.models.customllm import CustomLLM
|
14 |
+
|
15 |
+
logger = get_logger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
def load_valid_tools(tools_mappings):
|
19 |
+
tools_to_config = {}
|
20 |
+
for key in tools_mappings:
|
21 |
+
get_url = tools_mappings[key] + ".well-known/ai-plugin.json"
|
22 |
+
|
23 |
+
response = requests.get(get_url)
|
24 |
+
|
25 |
+
if response.status_code == 200:
|
26 |
+
tools_to_config[key] = response.json()
|
27 |
+
else:
|
28 |
+
logger.warning(
|
29 |
+
"Load tool {} error, status code {}".format(key, response.status_code)
|
30 |
+
)
|
31 |
+
|
32 |
+
return tools_to_config
|
33 |
+
|
34 |
+
|
35 |
+
class MTQuestionAnswerer:
|
36 |
+
"""Use multiple tools to answer a question. Basically pass a natural question to"""
|
37 |
+
|
38 |
+
def __init__(self, openai_api_key, all_tools, stream_output=False, llm="ChatGPT"):
|
39 |
+
if len(openai_api_key) < 3: # not valid key (TODO: more rigorous checking)
|
40 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
41 |
+
self.openai_api_key = openai_api_key
|
42 |
+
self.stream_output = stream_output
|
43 |
+
self.llm_model = llm
|
44 |
+
self.set_openai_api_key(openai_api_key)
|
45 |
+
self.load_tools(all_tools)
|
46 |
+
|
47 |
+
def set_openai_api_key(self, key):
|
48 |
+
logger.info("Using {}".format(self.llm_model))
|
49 |
+
if self.llm_model == "GPT-3.5":
|
50 |
+
self.llm = OpenAI(temperature=0.0, openai_api_key=key) # use text-darvinci
|
51 |
+
elif self.llm_model == "ChatGPT":
|
52 |
+
self.llm = OpenAI(
|
53 |
+
model_name="gpt-3.5-turbo", temperature=0.0, openai_api_key=key
|
54 |
+
) # use chatgpt
|
55 |
+
else:
|
56 |
+
raise RuntimeError("Your model is not available.")
|
57 |
+
|
58 |
+
def load_tools(self, all_tools):
|
59 |
+
logger.info("All tools: {}".format(all_tools))
|
60 |
+
self.all_tools_map = {}
|
61 |
+
self.tools_pool = []
|
62 |
+
for name in all_tools:
|
63 |
+
meta_info = all_tools[name]
|
64 |
+
|
65 |
+
question_answer = STQuestionAnswerer(
|
66 |
+
self.openai_api_key,
|
67 |
+
stream_output=self.stream_output,
|
68 |
+
llm=self.llm_model,
|
69 |
+
)
|
70 |
+
subagent = question_answer.load_tools(
|
71 |
+
name,
|
72 |
+
meta_info,
|
73 |
+
prompt_type="react-with-tool-description",
|
74 |
+
return_intermediate_steps=False,
|
75 |
+
)
|
76 |
+
tool_logo_md = f'<img src="{meta_info["logo_url"]}" width="32" height="32" style="display:inline-block">'
|
77 |
+
for tool in subagent.tools:
|
78 |
+
tool.tool_logo_md = tool_logo_md
|
79 |
+
tool = Tool(
|
80 |
+
name=meta_info["name_for_model"],
|
81 |
+
description=meta_info["description_for_model"]
|
82 |
+
.replace("{", "{{")
|
83 |
+
.replace("}", "}}"),
|
84 |
+
func=subagent,
|
85 |
+
)
|
86 |
+
tool.tool_logo_md = tool_logo_md
|
87 |
+
self.tools_pool.append(tool)
|
88 |
+
|
89 |
+
def build_runner(
|
90 |
+
self,
|
91 |
+
):
|
92 |
+
from langchain.vectorstores import FAISS
|
93 |
+
from langchain.docstore import InMemoryDocstore
|
94 |
+
from langchain.embeddings import OpenAIEmbeddings
|
95 |
+
|
96 |
+
embeddings_model = OpenAIEmbeddings()
|
97 |
+
import faiss
|
98 |
+
|
99 |
+
embedding_size = 1536
|
100 |
+
index = faiss.IndexFlatL2(embedding_size)
|
101 |
+
vectorstore = FAISS(
|
102 |
+
embeddings_model.embed_query, index, InMemoryDocstore({}), {}
|
103 |
+
)
|
104 |
+
|
105 |
+
from .autogptmulti.agent import AutoGPT
|
106 |
+
from langchain.chat_models import ChatOpenAI
|
107 |
+
|
108 |
+
agent_executor = AutoGPT.from_llm_and_tools(
|
109 |
+
ai_name="Tom",
|
110 |
+
ai_role="Assistant",
|
111 |
+
tools=self.tools_pool,
|
112 |
+
llm=ChatOpenAI(temperature=0),
|
113 |
+
memory=vectorstore.as_retriever(),
|
114 |
+
)
|
115 |
+
'''
|
116 |
+
# 可以修改prompt来让模型表现更好,也可以修改tool的doc
|
117 |
+
prefix = """Answer the following questions as best you can. In this level, you are calling the tools in natural language format, since the tools are actually an intelligent agent like you, but they expert only in one area. Several things to remember. (1) Remember to follow the format of passing natural language as the Action Input. (2) DO NOT use your imagination, only use concrete information given by the tools. (3) If the observation contains images or urls which has useful information, YOU MUST INCLUDE ALL USEFUL IMAGES and links in your Answer and Final Answers using format ![img](url). BUT DO NOT provide any imaginary links. (4) The information in your Final Answer should include ALL the informations returned by the tools. (5) If a user's query is a language other than English, please translate it to English without tools, and translate it back to the source language in Final Answer. You have access to the following tools (Only use these tools we provide you):"""
|
118 |
+
suffix = """\nBegin! Remember to . \nQuestion: {input}\n{agent_scratchpad}"""
|
119 |
+
|
120 |
+
|
121 |
+
prompt = ZeroShotAgent.create_prompt(
|
122 |
+
self.tools_pool,
|
123 |
+
prefix=prefix,
|
124 |
+
suffix=suffix,
|
125 |
+
input_variables=["input", "agent_scratchpad"]
|
126 |
+
)
|
127 |
+
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
|
128 |
+
logger.info("Full Prompt Template:\n {}".format(prompt.template))
|
129 |
+
tool_names = [tool.name for tool in self.tools_pool]
|
130 |
+
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
|
131 |
+
if self.stream_output:
|
132 |
+
agent_executor = Executor.from_agent_and_tools(agent=agent, tools=self.tools_pool, verbose=True, return_intermediate_steps=True)
|
133 |
+
else:
|
134 |
+
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(agent=agent, tools=self.tools_pool, verbose=True, return_intermediate_steps=True)
|
135 |
+
'''
|
136 |
+
return agent_executor
|
137 |
+
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
tools_mappings = {
|
141 |
+
"klarna": "https://www.klarna.com/",
|
142 |
+
"chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/",
|
143 |
+
"wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/",
|
144 |
+
"weather": "http://127.0.0.1:8079/tools/weather/",
|
145 |
+
}
|
146 |
+
|
147 |
+
tools = load_valid_tools(tools_mappings)
|
148 |
+
|
149 |
+
qa = MTQuestionAnswerer(openai_api_key="", all_tools=tools)
|
150 |
+
|
151 |
+
agent = qa.build_runner()
|
152 |
+
|
153 |
+
agent(
|
154 |
+
"How many carbon elements are there in CH3COOH? How many people are there in China?"
|
155 |
+
)
|
agent/translator.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms import OpenAI
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
|
5 |
+
import py3langid as langid
|
6 |
+
from iso639 import languages
|
7 |
+
|
8 |
+
from typing import Dict
|
9 |
+
from copy import deepcopy
|
10 |
+
import os
|
11 |
+
|
12 |
+
|
13 |
+
def detect_lang(text: str):
|
14 |
+
lang_code = langid.classify(text)[0]
|
15 |
+
lang_name = languages.get(part1=lang_code[:2]).name
|
16 |
+
return lang_name
|
17 |
+
|
18 |
+
|
19 |
+
class Translator:
|
20 |
+
def __init__(self, openai_api_key: str = None, model_name: str = "gpt-3.5-turbo"):
|
21 |
+
self.openai_api_key = openai_api_key
|
22 |
+
self.model_name = model_name
|
23 |
+
self.init_flag = False
|
24 |
+
|
25 |
+
def init_model(self):
|
26 |
+
llm = self.create_openai_model(self.openai_api_key, self.model_name)
|
27 |
+
prompt = self.create_prompt()
|
28 |
+
self.chain = LLMChain(llm=llm, prompt=prompt)
|
29 |
+
self.init_flag = True
|
30 |
+
|
31 |
+
def __call__(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
32 |
+
if not self.init_flag:
|
33 |
+
self.init_model()
|
34 |
+
|
35 |
+
question = inputs["input"]
|
36 |
+
answer = inputs["output"]
|
37 |
+
|
38 |
+
src_lang = detect_lang(answer)
|
39 |
+
tgt_lang = detect_lang(question)
|
40 |
+
|
41 |
+
if src_lang != tgt_lang:
|
42 |
+
translated_answer = self.chain.run(text=answer, language=tgt_lang)
|
43 |
+
outputs = deepcopy(inputs)
|
44 |
+
outputs["output"] = translated_answer
|
45 |
+
return outputs
|
46 |
+
else:
|
47 |
+
return inputs
|
48 |
+
|
49 |
+
def create_openai_model(self, openai_api_key: str, model_name: str) -> OpenAI:
|
50 |
+
if openai_api_key is None:
|
51 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
52 |
+
llm = OpenAI(
|
53 |
+
model_name=model_name, temperature=0.0, openai_api_key=openai_api_key
|
54 |
+
)
|
55 |
+
return llm
|
56 |
+
|
57 |
+
def create_prompt(self) -> PromptTemplate:
|
58 |
+
template = """
|
59 |
+
Translate to {language}: {text} =>
|
60 |
+
"""
|
61 |
+
prompt = PromptTemplate(input_variables=["text", "language"], template=template)
|
62 |
+
return prompt
|
63 |
+
|
64 |
+
|
65 |
+
if __name__ == "__main__":
|
66 |
+
lang = {
|
67 |
+
"zh": {
|
68 |
+
"question": "帮我介绍下《深海》这部电影",
|
69 |
+
"answer": "《深海》是一部中国大陆的动画、奇幻电影,由田晓鹏导演,苏鑫、王亭文、滕奎兴等人主演。剧情简介是在大海的最深处,藏着所有秘密。一位现代少女(参宿)误入梦幻的 深海世界,却因此邂逅了一段独特的生命旅程。![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)",
|
70 |
+
},
|
71 |
+
"ja": {
|
72 |
+
"question": "映画「深海」について教えてください",
|
73 |
+
"answer": "「深海」は、中国本土のアニメーションおよびファンタジー映画で、Tian Xiaopeng が監督し、Su Xin、Wang Tingwen、Teng Kuixing などが出演しています。 あらすじは、海の最深部にはすべての秘密が隠されているというもの。 夢のような深海の世界に迷い込んだ現代少女(さんすけ)は、それをきっかけに独特の人生の旅に出くわす。 ![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)",
|
74 |
+
},
|
75 |
+
"ko": {
|
76 |
+
"question": "영화 딥씨에 대해 알려주세요",
|
77 |
+
"answer": '"Deep Sea"는 Tian Xiaopeng 감독, Su Xin, Wang Tingwen, Teng Kuixing 등이 출연한 중국 본토의 애니메이션 및 판타지 영화입니다. 시놉시스는 바다 가장 깊은 곳에 모든 비밀이 숨겨져 있다는 것입니다. 현대 소녀(산스케)는 꿈 같은 심해 세계로 방황하지만 그것 때문에 독특한 삶의 여정을 만난다. ![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)',
|
78 |
+
},
|
79 |
+
"en": {
|
80 |
+
"question": "Tell me about the movie '深海'",
|
81 |
+
"answer": '"Deep Sea" is an animation and fantasy film in mainland China, directed by Tian Xiaopeng, starring Su Xin, Wang Tingwen, Teng Kuixing and others. The synopsis is that in the deepest part of the sea, all secrets are hidden. A modern girl (Sansuke) strays into the dreamy deep sea world, but encounters a unique journey of life because of it. ![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)',
|
82 |
+
},
|
83 |
+
"de": {
|
84 |
+
"question": "Erzähl mir von dem Film '深海'",
|
85 |
+
"answer": '"Deep Sea" ist ein Animations- und Fantasyfilm in Festlandchina unter der Regie von Tian Xiaopeng mit Su Xin, Wang Tingwen, Teng Kuixing und anderen in den Hauptrollen. Die Zusammenfassung ist, dass im tiefsten Teil des Meeres alle Geheimnisse verborgen sind. Ein modernes Mädchen (Sansuke) verirrt sich in die verträumte Tiefseewelt, trifft dabei aber auf eine einzigartige Lebensreise. ![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)',
|
86 |
+
},
|
87 |
+
"fr": {
|
88 |
+
"question": "Parlez-moi du film 'Deep Sea'",
|
89 |
+
"answer": "\"Deep Sea\" est un film d'animation et fantastique en Chine continentale, réalisé par Tian Xiaopeng, avec Su Xin, Wang Tingwen, Teng Kuixing et d'autres. Le synopsis est que dans la partie la plus profonde de la mer, tous les secrets sont cachés. Une fille moderne (Sansuke) s'égare dans le monde onirique des profondeurs marines, mais rencontre un voyage de vie unique à cause de cela. ![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)",
|
90 |
+
},
|
91 |
+
"ru": {
|
92 |
+
"question": "Расскажите о фильме 'Глубокое море'",
|
93 |
+
"answer": "«Глубокое море» — это анимационный и фэнтезийный фильм в материковом Китае, снятый Тянь Сяопином, в главных ролях Су Синь, Ван Тинвэнь, Тэн Куйсин и другие. Суть в том, что в самой глубокой части моря скрыты все секреты. Современная девушка (Сансукэ) заблудилась в мечтательном глубоководном мире, но из-за этого столкнулась с уникальным жизненным путешествием. ![img](https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2635450820.webp)",
|
94 |
+
},
|
95 |
+
}
|
96 |
+
|
97 |
+
translator = Translator()
|
98 |
+
for source in lang:
|
99 |
+
for target in lang:
|
100 |
+
print(source, "=>", target, end=":\t")
|
101 |
+
question = lang[target]["question"]
|
102 |
+
answer = lang[source]["answer"]
|
103 |
+
inputs = {"input": question, "output": answer}
|
104 |
+
|
105 |
+
result = translator(inputs)
|
106 |
+
translated_answer = result["output"]
|
107 |
+
|
108 |
+
if (
|
109 |
+
detect_lang(question)
|
110 |
+
== detect_lang(translated_answer)
|
111 |
+
== languages.get(part1=target).name
|
112 |
+
):
|
113 |
+
print("Y")
|
114 |
+
else:
|
115 |
+
print("N")
|
116 |
+
print("====================")
|
117 |
+
print("Question:\t", detect_lang(question), " - ", question)
|
118 |
+
print("Answer:\t", detect_lang(answer), " - ", answer)
|
119 |
+
print(
|
120 |
+
"Translated Anser:\t",
|
121 |
+
detect_lang(translated_answer),
|
122 |
+
" - ",
|
123 |
+
translated_answer,
|
124 |
+
)
|
125 |
+
print("====================")
|
agent_tools.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
# ---------- Dependencies ----------
|
4 |
+
import os
|
5 |
+
import asyncio
|
6 |
+
import faiss
|
7 |
+
from typing import Any, Optional, List
|
8 |
+
from contextlib import contextmanager
|
9 |
+
|
10 |
+
from pydantic import BaseModel, Field
|
11 |
+
from langchain import LLMChain, OpenAI, PromptTemplate
|
12 |
+
from langchain.chains.base import Chain
|
13 |
+
|
14 |
+
from langchain.experimental import BabyAGI
|
15 |
+
from langchain.embeddings import OpenAIEmbeddings
|
16 |
+
from langchain.vectorstores.base import VectorStore
|
17 |
+
from langchain.vectorstores import FAISS
|
18 |
+
|
19 |
+
from langchain.docstore import InMemoryDocstore
|
20 |
+
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
|
21 |
+
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
|
22 |
+
|
23 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
24 |
+
from langchain.tools import BaseTool, DuckDuckGoSearchRun
|
25 |
+
from langchain.tools.file_management.read import ReadFileTool
|
26 |
+
|
27 |
+
from langchain.tools.file_management.write import WriteFileTool
|
28 |
+
from langchain.tools.human.tool import HumanInputRun
|
29 |
+
from swarms.tools import Terminal, CodeWriter, CodeEditor, process_csv, WebpageQATool
|
30 |
+
|
31 |
+
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
|
32 |
+
from langchain.chat_models import ChatOpenAI
|
33 |
+
from langchain.tools import tool
|
34 |
+
|
35 |
+
# ---------- Constants ----------
|
36 |
+
ROOT_DIR = "./data/"
|
37 |
+
|
38 |
+
# ---------- Tools ----------
|
39 |
+
openai_api_key = os.environ["OPENAI_API_KEY"]
|
40 |
+
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key=openai_api_key)
|
41 |
+
|
42 |
+
worker_tools = [
|
43 |
+
DuckDuckGoSearchRun(),
|
44 |
+
WriteFileTool(root_dir=ROOT_DIR),
|
45 |
+
ReadFileTool(root_dir=ROOT_DIR),
|
46 |
+
process_csv,
|
47 |
+
|
48 |
+
WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)),
|
49 |
+
|
50 |
+
# Tool(name='terminal', func=Terminal.execute, description='Operates a terminal'),
|
51 |
+
# Tool(name='code_writer', func=CodeWriter(), description='Writes code'),
|
52 |
+
# Tool(name='code_editor', func=CodeEditor(), description='Edits code'),#
|
53 |
+
]
|
54 |
+
|
55 |
+
# ---------- Vector Store ----------
|
56 |
+
embeddings_model = OpenAIEmbeddings()
|
57 |
+
embedding_size = 1536
|
58 |
+
index = faiss.IndexFlatL2(embedding_size)
|
59 |
+
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
|
airbnb/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..registry import register
|
2 |
+
|
3 |
+
|
4 |
+
@register("airbnb")
|
5 |
+
def airbnb():
|
6 |
+
from .api import build_tool
|
7 |
+
|
8 |
+
return build_tool
|
airbnb/api.py
ADDED
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
from datetime import date, datetime, timedelta
|
4 |
+
import os
|
5 |
+
from ..tool import Tool
|
6 |
+
|
7 |
+
from typing import Optional, Dict, List
|
8 |
+
|
9 |
+
|
10 |
+
def build_tool(config) -> Tool:
|
11 |
+
tool = Tool(
|
12 |
+
"Short-term rental and housing information",
|
13 |
+
"Look up rental and housing information",
|
14 |
+
name_for_model="Airbnb",
|
15 |
+
description_for_model="Plugin for look up rental and housing information",
|
16 |
+
logo_url="https://your-app-url.com/.well-known/logo.png",
|
17 |
+
contact_email="[email protected]",
|
18 |
+
legal_info_url="[email protected]",
|
19 |
+
)
|
20 |
+
|
21 |
+
BASE_URL = "https://airbnb19.p.rapidapi.com/api/v1"
|
22 |
+
KEY = config["subscription_key"]
|
23 |
+
HEADERS = {"X-RapidAPI-Key": KEY, "X-RapidAPI-Host": "airbnb19.p.rapidapi.com"}
|
24 |
+
|
25 |
+
@tool.get("/ssearch_property")
|
26 |
+
def search_property(
|
27 |
+
_id: str,
|
28 |
+
display_name: Optional[str] = None,
|
29 |
+
total_records: Optional[str] = "10",
|
30 |
+
currency: Optional[str] = "USD",
|
31 |
+
offset: Optional[str] = None,
|
32 |
+
category: Optional[str] = None,
|
33 |
+
adults: Optional[int] = 1,
|
34 |
+
children: Optional[int] = None,
|
35 |
+
infants: Optional[int] = None,
|
36 |
+
pets: Optional[int] = None,
|
37 |
+
checkin: Optional[str] = None,
|
38 |
+
checkout: Optional[str] = None,
|
39 |
+
priceMin: Optional[int] = None,
|
40 |
+
priceMax: Optional[int] = None,
|
41 |
+
minBedrooms: Optional[int] = None,
|
42 |
+
minBeds: Optional[int] = None,
|
43 |
+
minBathrooms: Optional[int] = None,
|
44 |
+
property_type: Optional[List[str]] = None,
|
45 |
+
host_languages: Optional[List[str]] = None,
|
46 |
+
amenities: Optional[List[str]] = None,
|
47 |
+
type_of_place: Optional[List[str]] = None,
|
48 |
+
top_tier_stays: Optional[List[str]] = None,
|
49 |
+
self_check_in: Optional[bool] = None,
|
50 |
+
instant_book: Optional[bool] = None,
|
51 |
+
super_host: Optional[bool] = None,
|
52 |
+
languageId: Optional[str] = None,
|
53 |
+
) -> dict:
|
54 |
+
"""
|
55 |
+
This function takes various parameters to search properties on Airbnb.
|
56 |
+
|
57 |
+
Parameters:
|
58 |
+
api_key (str): The RapidAPI Key for Airbnb API.
|
59 |
+
id (str): The ID of the destination.
|
60 |
+
display_name (Optional[str]): The name of the destination.
|
61 |
+
total_records (Optional[str]): The number of records to be retrieved per API call.
|
62 |
+
currency (Optional[str]): The currency for the transaction.
|
63 |
+
offset (Optional[str]): The offset for the search result.
|
64 |
+
category (Optional[str]): The category of the properties.
|
65 |
+
adults (Optional[int]): The number of adults.
|
66 |
+
children (Optional[int]): The number of children.
|
67 |
+
infants (Optional[int]): The number of infants.
|
68 |
+
pets (Optional[int]): The number of pets.
|
69 |
+
checkin (Optional[str]): The check-in date.
|
70 |
+
checkout (Optional[str]): The check-out date.
|
71 |
+
priceMin (Optional[int]): The minimum price.
|
72 |
+
priceMax (Optional[int]): The maximum price.
|
73 |
+
minBedrooms (Optional[int]): The minimum number of bedrooms.
|
74 |
+
minBeds (Optional[int]): The minimum number of beds.
|
75 |
+
minBathrooms (Optional[int]): The minimum number of bathrooms.
|
76 |
+
property_type (Optional[List[str]]): The type of the property.
|
77 |
+
host_languages (Optional[List[str]]): The languages that the host can speak.
|
78 |
+
amenities (Optional[List[str]]): The amenities provided by the property.
|
79 |
+
type_of_place (Optional[List[str]]): The type of the place.
|
80 |
+
top_tier_stays (Optional[List[str]]): The list of top-tier stays.
|
81 |
+
self_check_in (Optional[bool]): If the property has self check-in feature.
|
82 |
+
instant_book (Optional[bool]): If the property can be booked instantly.
|
83 |
+
super_host (Optional[bool]): If the host is a super host.
|
84 |
+
languageId (Optional[str]): The ID of the language for the response.
|
85 |
+
|
86 |
+
Returns:
|
87 |
+
dict: A dictionary that contains the search results.
|
88 |
+
"""
|
89 |
+
|
90 |
+
params = {
|
91 |
+
"id": _id,
|
92 |
+
"display_name": display_name,
|
93 |
+
"totalRecords": total_records,
|
94 |
+
"currency": currency,
|
95 |
+
"offset": offset,
|
96 |
+
"category": category,
|
97 |
+
"adults": adults,
|
98 |
+
"children": children,
|
99 |
+
"infants": infants,
|
100 |
+
"pets": pets,
|
101 |
+
"checkin": checkin,
|
102 |
+
"checkout": checkout,
|
103 |
+
"priceMin": priceMin,
|
104 |
+
"priceMax": priceMax,
|
105 |
+
"minBedrooms": minBedrooms,
|
106 |
+
"minBeds": minBeds,
|
107 |
+
"minBathrooms": minBathrooms,
|
108 |
+
"property_type": property_type,
|
109 |
+
"host_languages": host_languages,
|
110 |
+
"amenities": amenities,
|
111 |
+
"type_of_place": type_of_place,
|
112 |
+
"top_tier_stays": top_tier_stays,
|
113 |
+
"self_check_in": self_check_in,
|
114 |
+
"instant_book": instant_book,
|
115 |
+
"super_host": super_host,
|
116 |
+
"languageId": languageId,
|
117 |
+
}
|
118 |
+
response = requests.get(
|
119 |
+
f"{BASE_URL}/searchPropertyByPlace", headers=HEADERS, params=params
|
120 |
+
)
|
121 |
+
return response.json()["data"][0]
|
122 |
+
|
123 |
+
@tool.get("/search_property_by_coordinates")
|
124 |
+
def search_property_by_coordinates(
|
125 |
+
neLat: float,
|
126 |
+
neLng: float,
|
127 |
+
swLat: float,
|
128 |
+
swLng: float,
|
129 |
+
currency: Optional[str] = "USD",
|
130 |
+
nextPageCursor: Optional[str] = None,
|
131 |
+
totalRecords: Optional[str] = None,
|
132 |
+
infants: Optional[int] = None,
|
133 |
+
adults: Optional[int] = 1,
|
134 |
+
children: Optional[int] = None,
|
135 |
+
pets: Optional[int] = None,
|
136 |
+
checkin: Optional[str] = None,
|
137 |
+
checkout: Optional[str] = None,
|
138 |
+
priceMin: Optional[int] = None,
|
139 |
+
priceMax: Optional[int] = None,
|
140 |
+
minBedrooms: Optional[int] = None,
|
141 |
+
minBeds: Optional[int] = None,
|
142 |
+
minBathrooms: Optional[int] = None,
|
143 |
+
property_type: Optional[List[str]] = None,
|
144 |
+
host_languages: Optional[List[str]] = None,
|
145 |
+
amenities: Optional[List[str]] = None,
|
146 |
+
type_of_place: Optional[List[str]] = None,
|
147 |
+
top_tier_stays: Optional[List[str]] = None,
|
148 |
+
super_host: Optional[bool] = None,
|
149 |
+
) -> dict:
|
150 |
+
"""
|
151 |
+
This function takes GEO coordinates and various other parameters to search properties on Airbnb.
|
152 |
+
|
153 |
+
Parameters:
|
154 |
+
neLat (float): Latitude of the northeastern corner of the search area.
|
155 |
+
neLng (float): Longitude of the northeastern corner of the search area.
|
156 |
+
swLat (float): Latitude of the southwestern corner of the search area.
|
157 |
+
swLng (float): Longitude of the southwestern corner of the search area.
|
158 |
+
Other parameters are the same as search_property function.
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
dict: A dictionary that contains the search results.
|
162 |
+
"""
|
163 |
+
|
164 |
+
params = {
|
165 |
+
"neLat": neLat,
|
166 |
+
"neLng": neLng,
|
167 |
+
"swLat": swLat,
|
168 |
+
"swLng": swLng,
|
169 |
+
"currency": currency,
|
170 |
+
"nextPageCursor": nextPageCursor,
|
171 |
+
"totalRecords": totalRecords,
|
172 |
+
"infants": infants,
|
173 |
+
"adults": adults,
|
174 |
+
"children": children,
|
175 |
+
"pets": pets,
|
176 |
+
"checkin": checkin,
|
177 |
+
"checkout": checkout,
|
178 |
+
"priceMin": priceMin,
|
179 |
+
"priceMax": priceMax,
|
180 |
+
"minBedrooms": minBedrooms,
|
181 |
+
"minBeds": minBeds,
|
182 |
+
"minBathrooms": minBathrooms,
|
183 |
+
"property_type": property_type,
|
184 |
+
"host_languages": host_languages,
|
185 |
+
"amenities": amenities,
|
186 |
+
"type_of_place": type_of_place,
|
187 |
+
"top_tier_stays": top_tier_stays,
|
188 |
+
"super_host": super_host,
|
189 |
+
}
|
190 |
+
response = requests.get(
|
191 |
+
f"https://airbnb19.p.rapidapi.com/api/v2/searchPropertyByGEO",
|
192 |
+
headers=HEADERS,
|
193 |
+
params=params,
|
194 |
+
)
|
195 |
+
return response.json()["data"]["list"][0]
|
196 |
+
|
197 |
+
@tool.get("/search_destination")
|
198 |
+
def search_destination(self, query: str, country: Optional[str] = None) -> dict:
|
199 |
+
"""
|
200 |
+
This function performs a destination search given a query and optionally a country. And return positions 'ID' information.
|
201 |
+
|
202 |
+
Parameters:
|
203 |
+
query (str): The search query.
|
204 |
+
country (Optional[str]): The country for the search.
|
205 |
+
|
206 |
+
Returns:
|
207 |
+
dict: A dictionary that contains the search results. including ID information for a destination
|
208 |
+
"""
|
209 |
+
|
210 |
+
params = {"query": query, "country": country}
|
211 |
+
response = requests.get(
|
212 |
+
f"{BASE_URL}/searchDestination", headers=HEADERS, params=params
|
213 |
+
)
|
214 |
+
return response.json()
|
215 |
+
|
216 |
+
@tool.get("/property_by_coordinates")
|
217 |
+
def property_by_coordinates(
|
218 |
+
long: float,
|
219 |
+
lat: float,
|
220 |
+
d: Optional[float] = None,
|
221 |
+
includeSold: Optional[bool] = None,
|
222 |
+
):
|
223 |
+
"""
|
224 |
+
Search property by coordinates.
|
225 |
+
|
226 |
+
Args:
|
227 |
+
long (float): Longitude of the property. This is a required parameter.
|
228 |
+
lat (float): Latitude of the property. This is a required parameter.
|
229 |
+
d (float, optional): Diameter in miles. The max and low values are 0.5 and 0.05 respectively. The default value is 0.1.
|
230 |
+
includeSold (bool, optional): Include sold properties in the results. True or 1 to include (default), False or 0 to exclude.
|
231 |
+
|
232 |
+
Returns:
|
233 |
+
A response object from the Zillow API with an array of zpid.
|
234 |
+
"""
|
235 |
+
params = {
|
236 |
+
"long": long,
|
237 |
+
"lat": lat,
|
238 |
+
"d": d,
|
239 |
+
"includeSold": includeSold,
|
240 |
+
}
|
241 |
+
|
242 |
+
# Remove parameters that are None
|
243 |
+
params = {k: v for k, v in params.items() if v is not None}
|
244 |
+
url = BASE_URL + "/propertyByCoordinates"
|
245 |
+
# Send GET request to Zillow API endpoint
|
246 |
+
response = requests.get(url, headers=HEADERS, params=params)
|
247 |
+
|
248 |
+
return response.json()
|
249 |
+
|
250 |
+
@tool.get("/get_property_details")
|
251 |
+
def get_property_details(
|
252 |
+
propertyId: int,
|
253 |
+
currency: Optional[str] = "USD",
|
254 |
+
checkIn: Optional[str] = None,
|
255 |
+
checkOut: Optional[str] = None,
|
256 |
+
adults: Optional[int] = 1,
|
257 |
+
children: Optional[int] = None,
|
258 |
+
infants: Optional[int] = None,
|
259 |
+
pets: Optional[int] = None,
|
260 |
+
languageId: Optional[str] = None,
|
261 |
+
) -> dict:
|
262 |
+
"""
|
263 |
+
This function retrieves the details of a property given its ID.
|
264 |
+
|
265 |
+
Parameters:
|
266 |
+
propertyId (int): The ID of the property.
|
267 |
+
currency (Optional[str]): The currency for the transaction.
|
268 |
+
checkIn (Optional[str]): The check-in date.
|
269 |
+
checkOut (Optional[str]): The check-out date.
|
270 |
+
adults (Optional[int]): The number of adults.
|
271 |
+
children (Optional[int]): The number of children.
|
272 |
+
infants (Optional[int]): The number of infants.
|
273 |
+
pets (Optional[int]): The number of pets.
|
274 |
+
languageId (Optional[str]): The ID of the language for the response.
|
275 |
+
|
276 |
+
Returns:
|
277 |
+
dict: A dictionary that contains the details of the property.
|
278 |
+
"""
|
279 |
+
|
280 |
+
params = {
|
281 |
+
"propertyId": propertyId,
|
282 |
+
"currency": currency,
|
283 |
+
"checkIn": checkIn,
|
284 |
+
"checkOut": checkOut,
|
285 |
+
"adults": adults,
|
286 |
+
"children": children,
|
287 |
+
"infants": infants,
|
288 |
+
"pets": pets,
|
289 |
+
"languageId": languageId,
|
290 |
+
}
|
291 |
+
response = requests.get(
|
292 |
+
f"https://airbnb19.p.rapidapi.com/api/v2/getPropertyDetails",
|
293 |
+
headers=HEADERS,
|
294 |
+
params=params,
|
295 |
+
)
|
296 |
+
return response.json()
|
297 |
+
|
298 |
+
@tool.get("/check_availability")
|
299 |
+
def check_availability(propertyId: int) -> dict:
|
300 |
+
"""
|
301 |
+
This function checks the availability of a property given its ID.
|
302 |
+
|
303 |
+
Parameters:
|
304 |
+
propertyId (int): The ID of the property.
|
305 |
+
|
306 |
+
Returns:
|
307 |
+
dict: A dictionary that contains the availability of the property.
|
308 |
+
"""
|
309 |
+
params = {
|
310 |
+
"propertyId": propertyId,
|
311 |
+
}
|
312 |
+
response = requests.get(
|
313 |
+
f"{BASE_URL}/checkAvailability", headers=HEADERS, params=params
|
314 |
+
)
|
315 |
+
return response.json()
|
316 |
+
|
317 |
+
@tool.get("/get_property_reviews")
|
318 |
+
def get_property_reviews(propertyId: int) -> dict:
|
319 |
+
"""
|
320 |
+
This function retrieves the reviews of a property given its ID.
|
321 |
+
|
322 |
+
Parameters:
|
323 |
+
propertyId (int): The ID of the property.
|
324 |
+
|
325 |
+
Returns:
|
326 |
+
dict: A dictionary that contains the reviews of the property.
|
327 |
+
"""
|
328 |
+
params = {
|
329 |
+
"propertyId": propertyId,
|
330 |
+
}
|
331 |
+
response = requests.get(
|
332 |
+
f"{BASE_URL}/getPropertyReviews", headers=HEADERS, params=params
|
333 |
+
)
|
334 |
+
return response.json()
|
335 |
+
|
336 |
+
@tool.get("/get_property_checkout_price")
|
337 |
+
def get_property_checkout_price(propertyId: int, checkIn: str) -> dict:
|
338 |
+
"""
|
339 |
+
This function retrieves the checkout cost of a property given its ID and check-in date.
|
340 |
+
|
341 |
+
Parameters:
|
342 |
+
propertyId (int): The ID of the property.
|
343 |
+
checkIn (str): The check-in date.
|
344 |
+
|
345 |
+
Returns:
|
346 |
+
dict: A dictionary that contains the checkout price of the property.
|
347 |
+
"""
|
348 |
+
params = {"propertyId": propertyId, "checkIn": checkIn}
|
349 |
+
response = requests.get(
|
350 |
+
f"{BASE_URL}/getPropertyCheckoutPrice", headers=HEADERS, params=params
|
351 |
+
)
|
352 |
+
return response.json()
|
353 |
+
|
354 |
+
return tool
|
airbnb/readme.md
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Airbnb Service
|
2 |
+
|
3 |
+
Contributor: [Kunlun Zhu](https://github.com/Kunlun-Zhu)
|
4 |
+
|
5 |
+
You can get your RAIPID key here: https://rapidapi.com/hub
|
6 |
+
|
7 |
+
You ought to subscribe 'Airbnb API' in your account to use this tool
|
8 |
+
|
9 |
+
# Short-term Rental and Housing Information Tool
|
10 |
+
|
11 |
+
This tool, named `Short-term Rental and Housing Information`, is designed to interact with the Airbnb API to search for properties, get property details, check availability, get property reviews, and retrieve the checkout price. The tool operates by making HTTP requests to the Airbnb API and formatting the responses into an easily usable form.
|
12 |
+
|
13 |
+
## Main Functionality
|
14 |
+
|
15 |
+
1. **Search for Properties**: This functionality allows you to search for properties based on a variety of parameters like the number of adults, children, and infants, property type, amenities, check-in and check-out dates, and many more. This is done using the `search_property` function.
|
16 |
+
|
17 |
+
2. **Search Property by Coordinates**: This function allows you to search for properties in a specific geographic area defined by the northeast and southwest coordinates of the area. This is done using the `search_property_by_coordinates` function.
|
18 |
+
|
19 |
+
3. **Search for Destination**: The `search_destination` function helps to perform a destination search given a query and optionally a country. It returns positions 'ID' information.
|
20 |
+
|
21 |
+
4. **Get Property Details**: The `get_property_details` function is used to retrieve detailed information about a specific property. This includes the number of rooms, amenities, location, and other relevant information.
|
22 |
+
|
23 |
+
5. **Check Property Availability**: This function, `check_availability`, allows you to check if a property is available for booking.
|
24 |
+
|
25 |
+
6. **Get Property Reviews**: You can use the `get_property_reviews` function to retrieve reviews of a property.
|
26 |
+
|
27 |
+
7. **Get Property Checkout Price**: The `get_property_checkout_price` function is used to get the checkout cost of a property given its ID and check-in date.
|
28 |
+
|
29 |
+
This tool provides a simple and effective way to interact with the Airbnb API, making it easier for developers to incorporate Airbnb data into their applications.
|
airbnb/test.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from swarms.tools.agent.singletool import load_single_tools, STQuestionAnswerer
|
2 |
+
|
3 |
+
tool_name, tool_url = "Airbnb", "http://127.0.0.1:8079/tools/airbnb/"
|
4 |
+
tools_name, tools_config = load_single_tools(tool_name, tool_url)
|
5 |
+
print(tools_name, tools_config)
|
6 |
+
|
7 |
+
qa = STQuestionAnswerer()
|
8 |
+
|
9 |
+
agent = qa.load_tools(tools_name, tools_config)
|
10 |
+
|
11 |
+
agent("List some houses to rent in Santa Monica, CA.")
|
apitool.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Interface for tools."""
|
2 |
+
from inspect import signature
|
3 |
+
from typing import Any, Awaitable, Callable, Optional, Union
|
4 |
+
|
5 |
+
from langchain.agents import Tool as LangChainTool
|
6 |
+
from langchain.tools.base import BaseTool
|
7 |
+
import requests
|
8 |
+
import json
|
9 |
+
import aiohttp
|
10 |
+
import http.client
|
11 |
+
|
12 |
+
http.client._MAXLINE = 655360
|
13 |
+
|
14 |
+
from tool_logging import get_logger
|
15 |
+
|
16 |
+
logger = get_logger(__name__)
|
17 |
+
|
18 |
+
|
19 |
+
class Tool(LangChainTool):
|
20 |
+
tool_logo_md: str = ""
|
21 |
+
|
22 |
+
|
23 |
+
class RequestTool(BaseTool):
|
24 |
+
"""Tool that takes in function or coroutine directly."""
|
25 |
+
|
26 |
+
description: str = ""
|
27 |
+
func: Callable[[str], str]
|
28 |
+
afunc: Callable[[str], str]
|
29 |
+
coroutine: Optional[Callable[[str], Awaitable[str]]] = None
|
30 |
+
max_output_len = 4000
|
31 |
+
tool_logo_md: str = ""
|
32 |
+
|
33 |
+
def _run(self, tool_input: str) -> str:
|
34 |
+
"""Use the tool."""
|
35 |
+
return self.func(tool_input)
|
36 |
+
|
37 |
+
async def _arun(self, tool_input: str) -> str:
|
38 |
+
"""Use the tool asynchronously."""
|
39 |
+
ret = await self.afunc(tool_input)
|
40 |
+
return ret
|
41 |
+
|
42 |
+
def convert_prompt(self, params):
|
43 |
+
lines = "Your input should be a json (args json schema): {{"
|
44 |
+
for p in params:
|
45 |
+
logger.debug(p)
|
46 |
+
optional = not p["required"]
|
47 |
+
description = p.get("description", "")
|
48 |
+
if len(description) > 0:
|
49 |
+
description = "(" + description + ")"
|
50 |
+
|
51 |
+
lines += '"{name}" : {type}{desc}, '.format(
|
52 |
+
name=p["name"],
|
53 |
+
type=p["schema"]["type"],
|
54 |
+
optional=optional,
|
55 |
+
desc=description,
|
56 |
+
)
|
57 |
+
|
58 |
+
lines += "}}"
|
59 |
+
return lines
|
60 |
+
|
61 |
+
def __init__(self, root_url, func_url, method, request_info, **kwargs):
|
62 |
+
"""Store the function, description, and tool_name in a class to store the information"""
|
63 |
+
url = root_url + func_url
|
64 |
+
|
65 |
+
def func(json_args):
|
66 |
+
if isinstance(json_args, str):
|
67 |
+
try:
|
68 |
+
json_args = json.loads(json_args)
|
69 |
+
except:
|
70 |
+
return "Your input can not be parsed as json, please use thought."
|
71 |
+
if "tool_input" in json_args:
|
72 |
+
json_args = json_args["tool_input"]
|
73 |
+
|
74 |
+
# if it's post put patch, then we do json
|
75 |
+
if method.lower() in ["post", "put", "patch"]:
|
76 |
+
response = getattr(requests, method.lower())(url, json=json_args)
|
77 |
+
else:
|
78 |
+
# for other methods, we use get, and use json_args as query params
|
79 |
+
response = requests.get(url, params=json_args)
|
80 |
+
if response.status_code == 200:
|
81 |
+
message = response.text
|
82 |
+
else:
|
83 |
+
message = f"Error code {response.status_code}. You can try (1) Change your input (2) Call another function. (If the same error code is produced more than 4 times, please use Thought: I can not use these APIs, so I will stop. Final Answer: No Answer, please check the APIs.)"
|
84 |
+
|
85 |
+
message = message[: self.max_output_len] # TODO: not rigorous, to improve
|
86 |
+
return message
|
87 |
+
|
88 |
+
def convert_openapi_to_params(request_body):
|
89 |
+
if not request_body:
|
90 |
+
return []
|
91 |
+
params = []
|
92 |
+
for content_type, content in request_body["content"].items():
|
93 |
+
schema = content["schema"]
|
94 |
+
properties = schema.get("properties", {})
|
95 |
+
required = schema.get("required", [])
|
96 |
+
for key, value in properties.items():
|
97 |
+
param = {
|
98 |
+
"name": key,
|
99 |
+
"schema": value,
|
100 |
+
"required": key in required,
|
101 |
+
"description": value.get("description", ""),
|
102 |
+
}
|
103 |
+
if (
|
104 |
+
content_type == "multipart/form-data"
|
105 |
+
and value.get("format") == "binary"
|
106 |
+
):
|
107 |
+
param["type"] = "file"
|
108 |
+
elif content_type in [
|
109 |
+
"application/x-www-form-urlencoded",
|
110 |
+
"multipart/form-data",
|
111 |
+
]:
|
112 |
+
param["type"] = "form"
|
113 |
+
else:
|
114 |
+
param["type"] = "json"
|
115 |
+
params.append(param)
|
116 |
+
return params
|
117 |
+
|
118 |
+
async def afunc(json_args):
|
119 |
+
if isinstance(json_args, str):
|
120 |
+
try:
|
121 |
+
json_args = json.loads(json_args)
|
122 |
+
except:
|
123 |
+
return "Your input can not be parsed as json, please use thought."
|
124 |
+
if "tool_input" in json_args:
|
125 |
+
json_args = json_args["tool_input"]
|
126 |
+
|
127 |
+
async with aiohttp.ClientSession() as session:
|
128 |
+
async with session.get(url, params=json_args) as response:
|
129 |
+
if response.status == 200:
|
130 |
+
message = await response.text()
|
131 |
+
else:
|
132 |
+
message = f"Error code {response.status_code}. You can try (1) Change your input (2) Call another function. (If the same error code is produced more than 4 times, please use Thought: I can not use these APIs, so I will stop. Final Answer: No Answer, please check the APIs.)"
|
133 |
+
|
134 |
+
message = message[: self.max_output_len] # TODO: not rigorous, to improve
|
135 |
+
return message
|
136 |
+
|
137 |
+
tool_name = func_url.replace("/", ".").strip(".")
|
138 |
+
str_doc = ""
|
139 |
+
if "parameters" in request_info[method]:
|
140 |
+
str_doc = self.convert_prompt(request_info[method]["parameters"])
|
141 |
+
|
142 |
+
if "requestBody" in request_info[method]:
|
143 |
+
str_doc = (
|
144 |
+
str_doc
|
145 |
+
+ "\n"
|
146 |
+
+ self.convert_prompt(
|
147 |
+
convert_openapi_to_params(request_info[method]["requestBody"])
|
148 |
+
)
|
149 |
+
)
|
150 |
+
|
151 |
+
# description = f"- {tool_name}:\n" + \
|
152 |
+
# request_info[method].get('summary', '').replace("{", "{{").replace("}", "}}") \
|
153 |
+
description = (
|
154 |
+
request_info[method]
|
155 |
+
.get("description", "")
|
156 |
+
.replace("{", "{{")
|
157 |
+
.replace("}", "}}")
|
158 |
+
+ ". "
|
159 |
+
+ str_doc
|
160 |
+
+ f" The Action to trigger this API should be {tool_name} and the input parameters should be a json dict string. Pay attention to the type of parameters."
|
161 |
+
)
|
162 |
+
|
163 |
+
logger.info("API Name: {}".format(tool_name))
|
164 |
+
logger.info("API Description: {}".format(description))
|
165 |
+
|
166 |
+
super(RequestTool, self).__init__(
|
167 |
+
name=tool_name, func=func, afunc=afunc, description=description, **kwargs
|
168 |
+
)
|
app.py
ADDED
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from botocore.exceptions import NoCredentialsError
|
3 |
+
import tokenize
|
4 |
+
import requests
|
5 |
+
import os
|
6 |
+
import time
|
7 |
+
from functools import partial
|
8 |
+
from pathlib import Path
|
9 |
+
from threading import Lock
|
10 |
+
import warnings
|
11 |
+
import json
|
12 |
+
from vllm import LLM
|
13 |
+
|
14 |
+
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
|
15 |
+
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
|
16 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
|
17 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
|
18 |
+
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict')
|
19 |
+
|
20 |
+
import matplotlib
|
21 |
+
import gradio as gr
|
22 |
+
from tools_controller import MTQuestionAnswerer, load_valid_tools
|
23 |
+
from singletool import STQuestionAnswerer
|
24 |
+
from langchain.schema import AgentFinish
|
25 |
+
import requests
|
26 |
+
from tool_server import run_tool_server
|
27 |
+
from threading import Thread
|
28 |
+
from multiprocessing import Process
|
29 |
+
import time
|
30 |
+
from langchain.llms import VLLM
|
31 |
+
import yaml
|
32 |
+
|
33 |
+
matplotlib.use('Agg') # This fixes LaTeX rendering on some systems
|
34 |
+
|
35 |
+
tool_server_flag = False
|
36 |
+
|
37 |
+
def start_tool_server():
|
38 |
+
# server = Thread(target=run_tool_server)
|
39 |
+
server = Process(target=run_tool_server)
|
40 |
+
server.start()
|
41 |
+
global tool_server_flag
|
42 |
+
tool_server_flag = True
|
43 |
+
|
44 |
+
|
45 |
+
DEFAULTMODEL = "ChatGPT" # "GPT-3.5"
|
46 |
+
|
47 |
+
# Read the model/ directory and get the list of models
|
48 |
+
available_models = ["ChatGPT", "GPT-3.5"]
|
49 |
+
|
50 |
+
tools_mappings = {
|
51 |
+
"klarna": "https://www.klarna.com/",
|
52 |
+
"weather": "http://127.0.0.1:8079/tools/weather/",
|
53 |
+
# "database": "http://127.0.0.1:8079/tools/database/",
|
54 |
+
# "db_diag": "http://127.0.0.1:8079/tools/db_diag/",
|
55 |
+
"chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/",
|
56 |
+
"douban-film": "http://127.0.0.1:8079/tools/douban-film/",
|
57 |
+
"wikipedia": "http://127.0.0.1:8079/tools/wikipedia/",
|
58 |
+
# "wikidata": "http://127.0.0.1:8079/tools/kg/wikidata/",
|
59 |
+
"wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/",
|
60 |
+
"bing_search": "http://127.0.0.1:8079/tools/bing_search/",
|
61 |
+
"office-ppt": "http://127.0.0.1:8079/tools/office-ppt/",
|
62 |
+
"stock": "http://127.0.0.1:8079/tools/stock/",
|
63 |
+
"bing_map": "http://127.0.0.1:8079/tools/map.bing_map/",
|
64 |
+
# "baidu_map": "http://127.0.0.1:8079/tools/map/baidu_map/",
|
65 |
+
"zillow": "http://127.0.0.1:8079/tools/zillow/",
|
66 |
+
"airbnb": "http://127.0.0.1:8079/tools/airbnb/",
|
67 |
+
"job_search": "http://127.0.0.1:8079/tools/job_search/",
|
68 |
+
# "baidu-translation": "http://127.0.0.1:8079/tools/translation/baidu-translation/",
|
69 |
+
# "nllb-translation": "http://127.0.0.1:8079/tools/translation/nllb-translation/",
|
70 |
+
"tutorial": "http://127.0.0.1:8079/tools/tutorial/",
|
71 |
+
"file_operation": "http://127.0.0.1:8079/tools/file_operation/",
|
72 |
+
"meta_analysis": "http://127.0.0.1:8079/tools/meta_analysis/",
|
73 |
+
"code_interpreter": "http://127.0.0.1:8079/tools/code_interpreter/",
|
74 |
+
"arxiv": "http://127.0.0.1:8079/tools/arxiv/",
|
75 |
+
"google_places": "http://127.0.0.1:8079/tools/google_places/",
|
76 |
+
"google_serper": "http://127.0.0.1:8079/tools/google_serper/",
|
77 |
+
"google_scholar": "http://127.0.0.1:8079/tools/google_scholar/",
|
78 |
+
"python": "http://127.0.0.1:8079/tools/python/",
|
79 |
+
"sceneXplain": "http://127.0.0.1:8079/tools/sceneXplain/",
|
80 |
+
"shell": "http://127.0.0.1:8079/tools/shell/",
|
81 |
+
"image_generation": "http://127.0.0.1:8079/tools/image_generation/",
|
82 |
+
"hugging_tools": "http://127.0.0.1:8079/tools/hugging_tools/",
|
83 |
+
"gradio_tools": "http://127.0.0.1:8079/tools/gradio_tools/",
|
84 |
+
"travel": "http://127.0.0.1:8079/tools/travel",
|
85 |
+
"walmart": "http://127.0.0.1:8079/tools/walmart",
|
86 |
+
}
|
87 |
+
|
88 |
+
# data = json.load(open('sourcery-engine/tools/openai.json')) # Load the JSON file
|
89 |
+
# items = data['items'] # Get the list of items
|
90 |
+
|
91 |
+
# for plugin in items: # Iterate over items, not data
|
92 |
+
# url = plugin['manifest']['api']['url']
|
93 |
+
# tool_name = plugin['namespace']
|
94 |
+
# tools_mappings[tool_name] = url[:-len('/.well-known/openai.yaml')]
|
95 |
+
|
96 |
+
# print(tools_mappings)
|
97 |
+
|
98 |
+
valid_tools_info = []
|
99 |
+
all_tools_list = []
|
100 |
+
|
101 |
+
gr.close_all()
|
102 |
+
|
103 |
+
MAX_TURNS = 30
|
104 |
+
MAX_BOXES = MAX_TURNS * 2
|
105 |
+
|
106 |
+
return_msg = []
|
107 |
+
chat_history = ""
|
108 |
+
|
109 |
+
MAX_SLEEP_TIME = 40
|
110 |
+
valid_tools_info = {}
|
111 |
+
|
112 |
+
import gradio as gr
|
113 |
+
from tools_controller import load_valid_tools, tools_mappings
|
114 |
+
|
115 |
+
def load_tools():
|
116 |
+
global valid_tools_info
|
117 |
+
global all_tools_list
|
118 |
+
try:
|
119 |
+
valid_tools_info = load_valid_tools(tools_mappings)
|
120 |
+
print(f"valid_tools_info: {valid_tools_info}") # Debugging line
|
121 |
+
except BaseException as e:
|
122 |
+
print(repr(e))
|
123 |
+
all_tools_list = sorted(list(valid_tools_info.keys()))
|
124 |
+
print(f"all_tools_list: {all_tools_list}") # Debugging line
|
125 |
+
return gr.update(choices=all_tools_list)
|
126 |
+
|
127 |
+
def set_environ(OPENAI_API_KEY: str = "",
|
128 |
+
WOLFRAMALPH_APP_ID: str = "",
|
129 |
+
WEATHER_API_KEYS: str = "",
|
130 |
+
BING_SUBSCRIPT_KEY: str = "",
|
131 |
+
ALPHA_VANTAGE_KEY: str = "",
|
132 |
+
BING_MAP_KEY: str = "",
|
133 |
+
BAIDU_TRANSLATE_KEY: str = "",
|
134 |
+
RAPIDAPI_KEY: str = "",
|
135 |
+
SERPER_API_KEY: str = "",
|
136 |
+
GPLACES_API_KEY: str = "",
|
137 |
+
SCENEX_API_KEY: str = "",
|
138 |
+
STEAMSHIP_API_KEY: str = "",
|
139 |
+
HUGGINGFACE_API_KEY: str = "",
|
140 |
+
AMADEUS_ID: str = "",
|
141 |
+
AMADEUS_KEY: str = "",
|
142 |
+
AWS_ACCESS_KEY_ID: str = "",
|
143 |
+
AWS_SECRET_ACCESS_KEY: str = "",
|
144 |
+
AWS_DEFAULT_REGION: str = "",
|
145 |
+
):
|
146 |
+
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
|
147 |
+
os.environ["WOLFRAMALPH_APP_ID"] = WOLFRAMALPH_APP_ID
|
148 |
+
os.environ["WEATHER_API_KEYS"] = WEATHER_API_KEYS
|
149 |
+
os.environ["BING_SUBSCRIPT_KEY"] = BING_SUBSCRIPT_KEY
|
150 |
+
os.environ["ALPHA_VANTAGE_KEY"] = ALPHA_VANTAGE_KEY
|
151 |
+
os.environ["BING_MAP_KEY"] = BING_MAP_KEY
|
152 |
+
os.environ["BAIDU_TRANSLATE_KEY"] = BAIDU_TRANSLATE_KEY
|
153 |
+
os.environ["RAPIDAPI_KEY"] = RAPIDAPI_KEY
|
154 |
+
os.environ["SERPER_API_KEY"] = SERPER_API_KEY
|
155 |
+
os.environ["GPLACES_API_KEY"] = GPLACES_API_KEY
|
156 |
+
os.environ["SCENEX_API_KEY"] = SCENEX_API_KEY
|
157 |
+
os.environ["STEAMSHIP_API_KEY"] = STEAMSHIP_API_KEY
|
158 |
+
os.environ["HUGGINGFACE_API_KEY"] = HUGGINGFACE_API_KEY
|
159 |
+
os.environ["AMADEUS_ID"] = AMADEUS_ID
|
160 |
+
os.environ["AMADEUS_KEY"] = AMADEUS_KEY
|
161 |
+
os.environ["AWS_ACCESS_KEY_ID"] = AWS_ACCESS_KEY_ID
|
162 |
+
os.environ["AWS_SECRET_ACCESS_KEY"] = AWS_SECRET_ACCESS_KEY
|
163 |
+
os.environ["AWS_DEFAULT_REGION"] = AWS_DEFAULT_REGION
|
164 |
+
|
165 |
+
if not tool_server_flag:
|
166 |
+
start_tool_server()
|
167 |
+
time.sleep(MAX_SLEEP_TIME)
|
168 |
+
|
169 |
+
# Check if AWS keys are set and if so, configure AWS
|
170 |
+
if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY and AWS_DEFAULT_REGION:
|
171 |
+
try:
|
172 |
+
s3 = boto3.client('s3')
|
173 |
+
s3.list_buckets()
|
174 |
+
aws_status = "AWS setup successful"
|
175 |
+
except NoCredentialsError:
|
176 |
+
aws_status = "AWS setup failed: Invalid credentials"
|
177 |
+
else:
|
178 |
+
aws_status = "Keys set successfully"
|
179 |
+
|
180 |
+
return gr.update(value="OK!"), aws_status
|
181 |
+
|
182 |
+
def show_avatar_imgs(tools_chosen):
|
183 |
+
if len(tools_chosen) == 0:
|
184 |
+
tools_chosen = list(valid_tools_info.keys())
|
185 |
+
img_template = '<a href="{}" style="float: left"> <img style="margin:5px" src="{}.png" width="24" height="24" alt="avatar" /> {} </a>'
|
186 |
+
imgs = [valid_tools_info[tool]['avatar'] for tool in tools_chosen if valid_tools_info[tool]['avatar'] != None]
|
187 |
+
imgs = ' '.join([img_template.format(img, img, tool) for img, tool in zip(imgs, tools_chosen)])
|
188 |
+
return [gr.update(value='<span class="">' + imgs + '</span>', visible=True), gr.update(visible=True)]
|
189 |
+
|
190 |
+
def answer_by_tools(question, tools_chosen, model_chosen):
|
191 |
+
global return_msg
|
192 |
+
return_msg += [(question, None), (None, '...')]
|
193 |
+
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
|
194 |
+
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY', '')
|
195 |
+
OPENAI_BASE_URL = os.environ.get('OPENAI_BASE_URL', 'https://api.openai.com')
|
196 |
+
|
197 |
+
if len(tools_chosen) == 0: # if there is no tools chosen, we use all todo (TODO: What if the pool is too large.)
|
198 |
+
tools_chosen = list(valid_tools_info.keys())
|
199 |
+
|
200 |
+
if len(tools_chosen) == 1:
|
201 |
+
answerer = STQuestionAnswerer(OPENAI_API_KEY.strip(), OPENAI_BASE_URL, stream_output=True, llm=model_chosen)
|
202 |
+
agent_executor = answerer.load_tools(tools_chosen[0], valid_tools_info[tools_chosen[0]],
|
203 |
+
prompt_type="react-with-tool-description", return_intermediate_steps=True)
|
204 |
+
else:
|
205 |
+
answerer = MTQuestionAnswerer(OPENAI_API_KEY.strip(), OPENAI_BASE_URL,
|
206 |
+
load_valid_tools({k: tools_mappings[k] for k in tools_chosen}),
|
207 |
+
stream_output=True, llm=model_chosen)
|
208 |
+
|
209 |
+
agent_executor = answerer.build_runner()
|
210 |
+
|
211 |
+
global chat_history
|
212 |
+
chat_history += "Question: " + question + "\n"
|
213 |
+
question = chat_history
|
214 |
+
for inter in agent_executor(question):
|
215 |
+
if isinstance(inter, AgentFinish): continue
|
216 |
+
result_str = []
|
217 |
+
return_msg.pop()
|
218 |
+
if isinstance(inter, dict):
|
219 |
+
result_str.append("<font color=red>Answer:</font> {}".format(inter['output']))
|
220 |
+
chat_history += "Answer:" + inter['output'] + "\n"
|
221 |
+
result_str.append("...")
|
222 |
+
else:
|
223 |
+
try:
|
224 |
+
not_observation = inter[0].log
|
225 |
+
except:
|
226 |
+
print(inter[0])
|
227 |
+
not_observation = inter[0]
|
228 |
+
if not not_observation.startswith('Thought:'):
|
229 |
+
not_observation = "Thought: " + not_observation
|
230 |
+
chat_history += not_observation
|
231 |
+
not_observation = not_observation.replace('Thought:', '<font color=green>Thought: </font>')
|
232 |
+
not_observation = not_observation.replace('Action:', '<font color=purple>Action: </font>')
|
233 |
+
not_observation = not_observation.replace('Action Input:', '<font color=purple>Action Input: </font>')
|
234 |
+
result_str.append("{}".format(not_observation))
|
235 |
+
result_str.append("<font color=blue>Action output:</font>\n{}".format(inter[1]))
|
236 |
+
chat_history += "\nAction output:" + inter[1] + "\n"
|
237 |
+
result_str.append("...")
|
238 |
+
return_msg += [(None, result) for result in result_str]
|
239 |
+
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
|
240 |
+
return_msg.pop()
|
241 |
+
if return_msg[-1][1].startswith("<font color=red>Answer:</font> "):
|
242 |
+
return_msg[-1] = (return_msg[-1][0], return_msg[-1][1].replace("<font color=red>Answer:</font> ",
|
243 |
+
"<font color=green>Final Answer:</font> "))
|
244 |
+
yield [gr.update(visible=True, value=return_msg), gr.update(visible=True), gr.update(visible=False)]
|
245 |
+
|
246 |
+
|
247 |
+
def retrieve(tools_search):
|
248 |
+
if tools_search == "":
|
249 |
+
return gr.update(choices=all_tools_list)
|
250 |
+
else:
|
251 |
+
url = "http://127.0.0.1:8079/retrieve"
|
252 |
+
param = {
|
253 |
+
"query": tools_search
|
254 |
+
}
|
255 |
+
response = requests.post(url, json=param)
|
256 |
+
result = response.json()
|
257 |
+
retrieved_tools = result["tools"]
|
258 |
+
return gr.update(choices=retrieved_tools)
|
259 |
+
|
260 |
+
def clear_retrieve():
|
261 |
+
return [gr.update(value=""), gr.update(choices=all_tools_list)]
|
262 |
+
|
263 |
+
|
264 |
+
def clear_history():
|
265 |
+
global return_msg
|
266 |
+
global chat_history
|
267 |
+
return_msg = []
|
268 |
+
chat_history = ""
|
269 |
+
yield gr.update(visible=True, value=return_msg)
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
def fetch_tokenizer(model_name):
|
274 |
+
try:
|
275 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
276 |
+
return f"Tokenizer for {model_name} loaded successfully."
|
277 |
+
except Exception as e:
|
278 |
+
return f"Error loading tokenizer: {str(e)}"
|
279 |
+
|
280 |
+
# Add this function to handle the button click
|
281 |
+
import sky
|
282 |
+
|
283 |
+
# with gr.Blocks(css=css, analytics_enabled=False, title=title, theme=ui.theme) as demo:
|
284 |
+
with gr.Blocks() as demo:
|
285 |
+
with gr.Row():
|
286 |
+
with gr.Column(scale=14):
|
287 |
+
gr.Markdown("")
|
288 |
+
|
289 |
+
with gr.Tab("Key setting"):
|
290 |
+
OPENAI_API_KEY = gr.Textbox(label="OpenAI API KEY:", placeholder="sk-...", type="text")
|
291 |
+
WOLFRAMALPH_APP_ID = gr.Textbox(label="Wolframalpha app id:", placeholder="Key to use wlframalpha", type="text")
|
292 |
+
WEATHER_API_KEYS = gr.Textbox(label="Weather api key:", placeholder="Key to use weather api", type="text")
|
293 |
+
BING_SUBSCRIPT_KEY = gr.Textbox(label="Bing subscript key:", placeholder="Key to use bing search", type="text")
|
294 |
+
ALPHA_VANTAGE_KEY = gr.Textbox(label="Stock api key:", placeholder="Key to use stock api", type="text")
|
295 |
+
BING_MAP_KEY = gr.Textbox(label="Bing map key:", placeholder="Key to use bing map", type="text")
|
296 |
+
BAIDU_TRANSLATE_KEY = gr.Textbox(label="Baidu translation key:", placeholder="Key to use baidu translation", type="text")
|
297 |
+
RAPIDAPI_KEY = gr.Textbox(label="Rapidapi key:", placeholder="Key to use zillow, airbnb and job search", type="text")
|
298 |
+
SERPER_API_KEY = gr.Textbox(label="Serper key:", placeholder="Key to use google serper and google scholar", type="text")
|
299 |
+
GPLACES_API_KEY = gr.Textbox(label="Google places key:", placeholder="Key to use google places", type="text")
|
300 |
+
SCENEX_API_KEY = gr.Textbox(label="Scenex api key:", placeholder="Key to use sceneXplain", type="text")
|
301 |
+
STEAMSHIP_API_KEY = gr.Textbox(label="Steamship api key:", placeholder="Key to use image generation", type="text")
|
302 |
+
HUGGINGFACE_API_KEY = gr.Textbox(label="Huggingface api key:", placeholder="Key to use models in huggingface hub", type="text")
|
303 |
+
AMADEUS_KEY = gr.Textbox(label="Amadeus key:", placeholder="Key to use Amadeus", type="text")
|
304 |
+
AMADEUS_ID = gr.Textbox(label="Amadeus ID:", placeholder="Amadeus ID",
|
305 |
+
type="text")
|
306 |
+
AWS_ACCESS_KEY_ID = gr.Textbox(label="AWS Access Key ID:", placeholder="AWS Access Key ID", type="text")
|
307 |
+
AWS_SECRET_ACCESS_KEY = gr.Textbox(label="AWS Secret Access Key:", placeholder="AWS Secret Access Key", type="text")
|
308 |
+
AWS_DEFAULT_REGION = gr.Textbox(label="AWS Default Region:", placeholder="AWS Default Region", type="text")
|
309 |
+
key_set_btn = gr.Button(value="Set keys!")
|
310 |
+
|
311 |
+
|
312 |
+
with gr.Tab("Chat with Tool"):
|
313 |
+
with gr.Row():
|
314 |
+
with gr.Column(scale=4):
|
315 |
+
with gr.Row():
|
316 |
+
with gr.Column(scale=0.85):
|
317 |
+
txt = gr.Textbox(show_label=False, placeholder="Question here. Use Shift+Enter to add new line.",
|
318 |
+
lines=1)
|
319 |
+
with gr.Column(scale=0.15, min_width=0):
|
320 |
+
buttonChat = gr.Button("Chat")
|
321 |
+
|
322 |
+
chatbot = gr.Chatbot(show_label=False, visible=True)
|
323 |
+
buttonClear = gr.Button("Clear History")
|
324 |
+
buttonStop = gr.Button("Stop", visible=False)
|
325 |
+
|
326 |
+
with gr.Column(scale=4):
|
327 |
+
with gr.Row():
|
328 |
+
tools_search = gr.Textbox(
|
329 |
+
lines=1,
|
330 |
+
label="Tools Search",
|
331 |
+
placeholder="Please input some text to search tools.",
|
332 |
+
)
|
333 |
+
buttonSearch = gr.Button("Reset search condition")
|
334 |
+
tools_chosen = gr.CheckboxGroup(
|
335 |
+
choices=all_tools_list,
|
336 |
+
# value=["chemical-prop"],
|
337 |
+
label="Tools provided",
|
338 |
+
info="Choose the tools to solve your question.",
|
339 |
+
)
|
340 |
+
|
341 |
+
# TODO fix webgl galaxy backgroun
|
342 |
+
# def serve_iframe():
|
343 |
+
# return "<iframe src='http://localhost:8000/shader.html' width='100%' height='400'></iframe>"
|
344 |
+
|
345 |
+
# iface = gr.Interface(fn=serve_iframe, inputs=[], outputs=gr.outputs.HTML())
|
346 |
+
|
347 |
+
key_set_btn.click(fn=set_environ, inputs=[
|
348 |
+
OPENAI_API_KEY,
|
349 |
+
WOLFRAMALPH_APP_ID,
|
350 |
+
WEATHER_API_KEYS,
|
351 |
+
BING_SUBSCRIPT_KEY,
|
352 |
+
ALPHA_VANTAGE_KEY,
|
353 |
+
BING_MAP_KEY,
|
354 |
+
BAIDU_TRANSLATE_KEY,
|
355 |
+
RAPIDAPI_KEY,
|
356 |
+
SERPER_API_KEY,
|
357 |
+
GPLACES_API_KEY,
|
358 |
+
SCENEX_API_KEY,
|
359 |
+
STEAMSHIP_API_KEY,
|
360 |
+
HUGGINGFACE_API_KEY,
|
361 |
+
AMADEUS_ID,
|
362 |
+
AMADEUS_KEY,
|
363 |
+
], outputs=key_set_btn)
|
364 |
+
key_set_btn.click(fn=load_tools, outputs=tools_chosen)
|
365 |
+
|
366 |
+
tools_search.change(retrieve, tools_search, tools_chosen)
|
367 |
+
buttonSearch.click(clear_retrieve, [], [tools_search, tools_chosen])
|
368 |
+
|
369 |
+
txt.submit(lambda: [gr.update(value=''), gr.update(visible=False), gr.update(visible=True)], [],
|
370 |
+
[txt, buttonClear, buttonStop])
|
371 |
+
inference_event = txt.submit(answer_by_tools, [txt, tools_chosen], [chatbot, buttonClear, buttonStop])
|
372 |
+
buttonChat.click(answer_by_tools, [txt, tools_chosen], [chatbot, buttonClear, buttonStop])
|
373 |
+
buttonStop.click(lambda: [gr.update(visible=True), gr.update(visible=False)], [], [buttonClear, buttonStop],
|
374 |
+
cancels=[inference_event])
|
375 |
+
buttonClear.click(clear_history, [], chatbot)
|
376 |
+
|
377 |
+
# demo.queue().launch(share=False, inbrowser=True, server_name="127.0.0.1", server_port=7001)
|
378 |
+
demo.queue().launch(share=True)
|
379 |
+
|
380 |
+
|
arxiv/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..registry import register
|
2 |
+
|
3 |
+
|
4 |
+
@register("arxiv")
|
5 |
+
def arxiv():
|
6 |
+
from .api import build_tool
|
7 |
+
|
8 |
+
return build_tool
|
arxiv/api.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..tool import Tool
|
2 |
+
from typing import Any
|
3 |
+
import arxiv
|
4 |
+
|
5 |
+
|
6 |
+
def build_tool(config) -> Tool:
|
7 |
+
tool = Tool(
|
8 |
+
"Arxiv",
|
9 |
+
"Look up for information from scientific articles on arxiv.org",
|
10 |
+
name_for_model="Arxiv",
|
11 |
+
description_for_model=(
|
12 |
+
"Search information from Arxiv.org "
|
13 |
+
"Useful for when you need to answer questions about Physics, Mathematics, "
|
14 |
+
"Computer Science, Quantitative Biology, Quantitative Finance, Statistics, "
|
15 |
+
"Electrical Engineering, and Economics "
|
16 |
+
"from scientific articles on arxiv.org. "
|
17 |
+
"Input should be a search query."
|
18 |
+
),
|
19 |
+
logo_url="https://your-app-url.com/.well-known/logo.png",
|
20 |
+
contact_email="[email protected]",
|
21 |
+
legal_info_url="[email protected]",
|
22 |
+
)
|
23 |
+
|
24 |
+
arxiv_exceptions: Any # :meta private:
|
25 |
+
top_k_results: int = 3
|
26 |
+
ARXIV_MAX_QUERY_LENGTH = 300
|
27 |
+
doc_content_chars_max: int = 4000
|
28 |
+
|
29 |
+
@tool.get("/get_arxiv_article_information")
|
30 |
+
def get_arxiv_article_information(query: str):
|
31 |
+
"""Run Arxiv search and get the article meta information."""
|
32 |
+
param = {"q": query}
|
33 |
+
try:
|
34 |
+
results = arxiv.Search( # type: ignore
|
35 |
+
query[:ARXIV_MAX_QUERY_LENGTH], max_results=top_k_results
|
36 |
+
).results()
|
37 |
+
except arxiv_exceptions as ex:
|
38 |
+
return f"Arxiv exception: {ex}"
|
39 |
+
docs = [
|
40 |
+
f"Published: {result.updated.date()}\nTitle: {result.title}\n"
|
41 |
+
f"Authors: {', '.join(a.name for a in result.authors)}\n"
|
42 |
+
f"Summary: {result.summary}"
|
43 |
+
for result in results
|
44 |
+
]
|
45 |
+
if docs:
|
46 |
+
return "\n\n".join(docs)[:doc_content_chars_max]
|
47 |
+
else:
|
48 |
+
return "No good Arxiv Result was found"
|
49 |
+
|
50 |
+
return tool
|
arxiv/readme.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Arxiv Queries
|
2 |
+
|
3 |
+
Contributor: [Sihan Zhao](https://github.com/Sarah816)
|
4 |
+
|
5 |
+
## Tool Description
|
6 |
+
This Python-based tool offers a streamlined way to look up scientific articles on Arxiv.org. Named "Arxiv", this tool is particularly helpful when you need to answer questions about Physics, Mathematics, Computer Science, Quantitative Biology, Quantitative Finance, Statistics, Electrical Engineering, and Economics based on scientific articles from Arxiv.org.
|
7 |
+
|
8 |
+
### Tool Specifications
|
9 |
+
|
10 |
+
- **Name**: Arxiv
|
11 |
+
- **Purpose**: Look up for information from scientific articles on arxiv.org
|
12 |
+
- **Logo**: ![Arxiv Logo](https://your-app-url.com/.well-known/logo.png)
|
13 |
+
- **Contact Email**: [email protected]
|
14 |
+
- **Legal Information**: [Legal Information]([email protected])
|
15 |
+
|
16 |
+
### Core Functionality
|
17 |
+
|
18 |
+
1. `get_arxiv_article_information`
|
19 |
+
|
20 |
+
This method takes a search query and returns meta-information about the Arxiv articles that match this query. The method uses an API to search articles on Arxiv.org and returns details like the date of publication, title of the article, names of the authors, and the summary of the article.
|
21 |
+
|
22 |
+
The method follows these steps:
|
23 |
+
|
24 |
+
- It takes a query as a string input.
|
25 |
+
- The query is passed to the Arxiv Search API.
|
26 |
+
- The method fetches the top three results.
|
27 |
+
- For each result, it collects information about the publication date, title, authors, and summary.
|
28 |
+
- It returns this information as a string.
|
29 |
+
|
30 |
+
If the search operation encounters an error, the method returns a message describing the Arxiv exception. If no suitable articles are found on Arxiv.org that match the query, it returns a message stating that no good Arxiv result was found.
|
31 |
+
|
32 |
+
### Constants
|
33 |
+
|
34 |
+
- **ARXIV_MAX_QUERY_LENGTH**: Maximum length of a query that can be passed to the Arxiv Search API. It's set to 300.
|
35 |
+
- **doc_content_chars_max**: Maximum characters of the Arxiv results to be returned. It's set to 4000.
|
36 |
+
- **top_k_results**: The maximum number of Arxiv Search results to be returned. It's set to 3.
|
37 |
+
|
38 |
+
Please note that the parameters can be optional and have their own default values. You should consult the method's documentation to understand the default behavior and the specific role of each parameter.
|
arxiv/test.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from swarms.tools.agent.singletool import load_single_tools, STQuestionAnswerer
|
2 |
+
|
3 |
+
tool_name, tool_url = "arxiv", "http://127.0.0.1:8079/tools/arxiv/"
|
4 |
+
tools_name, tools_config = load_single_tools(tool_name, tool_url)
|
5 |
+
print(tools_name, tools_config)
|
6 |
+
|
7 |
+
qa = STQuestionAnswerer()
|
8 |
+
|
9 |
+
agent = qa.load_tools(tools_name, tools_config)
|
10 |
+
|
11 |
+
agent("List some papers written by Timo Schick")
|
bing_search/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..registry import register
|
2 |
+
|
3 |
+
|
4 |
+
@register("bing_search")
|
5 |
+
def bing_search():
|
6 |
+
from .api import build_tool
|
7 |
+
|
8 |
+
return build_tool
|
bing_search/api.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
from ..tool import Tool
|
4 |
+
from enum import Enum
|
5 |
+
from typing import Tuple
|
6 |
+
|
7 |
+
|
8 |
+
# search result list chunk size
|
9 |
+
SEARCH_RESULT_LIST_CHUNK_SIZE = 3
|
10 |
+
# result target page text chunk content length
|
11 |
+
RESULT_TARGET_PAGE_PER_TEXT_COUNT = 500
|
12 |
+
|
13 |
+
|
14 |
+
class BingAPI:
|
15 |
+
"""
|
16 |
+
A class for performing searches on the Bing search engine.
|
17 |
+
|
18 |
+
Attributes
|
19 |
+
----------
|
20 |
+
bing_api : BingAPI
|
21 |
+
The Bing API to use for performing searches.
|
22 |
+
|
23 |
+
Methods
|
24 |
+
-------
|
25 |
+
__init__(self, subscription_key: str) -> None:
|
26 |
+
Initialize the BingSearch instance with the given subscription key.
|
27 |
+
search_top3(self, key_words: str) -> List[str]:
|
28 |
+
Perform a search on the Bing search engine with the given keywords and return the top 3 search results.
|
29 |
+
load_page_index(self, idx: int) -> str:
|
30 |
+
Load the detailed page of the search result at the given index.
|
31 |
+
"""
|
32 |
+
|
33 |
+
def __init__(self, subscription_key: str) -> None:
|
34 |
+
"""
|
35 |
+
Initialize the BingSearch instance with the given subscription key.
|
36 |
+
|
37 |
+
Parameters
|
38 |
+
----------
|
39 |
+
subscription_key : str
|
40 |
+
The subscription key to use for the Bing API.
|
41 |
+
"""
|
42 |
+
self._headers = {"Ocp-Apim-Subscription-Key": subscription_key}
|
43 |
+
self._endpoint = "https://api.bing.microsoft.com/v7.0/search"
|
44 |
+
self._mkt = "en-US"
|
45 |
+
|
46 |
+
def search(self, key_words: str, max_retry: int = 3):
|
47 |
+
for _ in range(max_retry):
|
48 |
+
try:
|
49 |
+
result = requests.get(
|
50 |
+
self._endpoint,
|
51 |
+
headers=self._headers,
|
52 |
+
params={"q": key_words, "mkt": self._mkt},
|
53 |
+
timeout=10,
|
54 |
+
)
|
55 |
+
except Exception:
|
56 |
+
# failed, retry
|
57 |
+
continue
|
58 |
+
|
59 |
+
if result.status_code == 200:
|
60 |
+
result = result.json()
|
61 |
+
# search result returned here
|
62 |
+
return result
|
63 |
+
else:
|
64 |
+
# failed, retry
|
65 |
+
continue
|
66 |
+
raise RuntimeError("Failed to access Bing Search API.")
|
67 |
+
|
68 |
+
def load_page(self, url: str, max_retry: int = 3) -> Tuple[bool, str]:
|
69 |
+
for _ in range(max_retry):
|
70 |
+
try:
|
71 |
+
res = requests.get(url, timeout=15)
|
72 |
+
if res.status_code == 200:
|
73 |
+
res.raise_for_status()
|
74 |
+
else:
|
75 |
+
raise RuntimeError(
|
76 |
+
"Failed to load page, code {}".format(res.status_code)
|
77 |
+
)
|
78 |
+
except Exception:
|
79 |
+
# failed, retry
|
80 |
+
res = None
|
81 |
+
continue
|
82 |
+
res.encoding = res.apparent_encoding
|
83 |
+
content = res.text
|
84 |
+
break
|
85 |
+
if res is None:
|
86 |
+
return (
|
87 |
+
False,
|
88 |
+
"Timeout for loading this page, Please try to load another one or search again.",
|
89 |
+
)
|
90 |
+
try:
|
91 |
+
soup = BeautifulSoup(content, "html.parser")
|
92 |
+
paragraphs = soup.find_all("p")
|
93 |
+
page_detail = ""
|
94 |
+
for p in paragraphs:
|
95 |
+
text = p.get_text().strip()
|
96 |
+
page_detail += text
|
97 |
+
return True, page_detail
|
98 |
+
except Exception:
|
99 |
+
return (
|
100 |
+
False,
|
101 |
+
"Timeout for loading this page, Please try to load another one or search again.",
|
102 |
+
)
|
103 |
+
|
104 |
+
|
105 |
+
class CONTENT_TYPE(Enum):
|
106 |
+
SEARCH_RESULT = 0
|
107 |
+
RESULT_TARGET_PAGE = 1
|
108 |
+
|
109 |
+
|
110 |
+
class ContentItem:
|
111 |
+
def __init__(self, type: CONTENT_TYPE, data):
|
112 |
+
self.type = type
|
113 |
+
self.data = data
|
114 |
+
|
115 |
+
|
116 |
+
class DigestData:
|
117 |
+
title: str
|
118 |
+
desc: str
|
119 |
+
chunkIndex: int
|
120 |
+
|
121 |
+
|
122 |
+
class Digest:
|
123 |
+
datas: list
|
124 |
+
checked: bool
|
125 |
+
|
126 |
+
|
127 |
+
class SessionData:
|
128 |
+
topic = None
|
129 |
+
content = []
|
130 |
+
digests = []
|
131 |
+
curResultChunk = 0
|
132 |
+
curTargetPageResultChunk = 0
|
133 |
+
|
134 |
+
|
135 |
+
data = SessionData()
|
136 |
+
|
137 |
+
|
138 |
+
def build_tool(config) -> Tool:
|
139 |
+
tool = Tool(
|
140 |
+
"Bing_search",
|
141 |
+
"Bing_search",
|
142 |
+
name_for_model="Bing_search",
|
143 |
+
name_for_human="Bing_search",
|
144 |
+
description_for_model="""Perform Search on Bing Search engine.
|
145 |
+
Use search_top3(key: str) to get top 3 search results after input the key to search.
|
146 |
+
Use load_page_index(idx: int) to load the detailed page of the search result.""",
|
147 |
+
description_for_human="Bing search API for browsing the internet and search for results.",
|
148 |
+
logo_url="https://your-app-url.com/.well-known/logo.png",
|
149 |
+
contact_email="[email protected]",
|
150 |
+
legal_info_url="[email protected]",
|
151 |
+
)
|
152 |
+
|
153 |
+
if "debug" in config and config["debug"]:
|
154 |
+
bing_api = config["bing_api"]
|
155 |
+
else:
|
156 |
+
bing_api = BingAPI(config["subscription_key"])
|
157 |
+
|
158 |
+
@tool.get("/search_top3")
|
159 |
+
def search_top3(key_words: str) -> str:
|
160 |
+
"""Search key words, return top 3 search results."""
|
161 |
+
top3 = search_all(key_words)[:3]
|
162 |
+
output = ""
|
163 |
+
for idx, item in enumerate(top3):
|
164 |
+
output += "page: " + str(idx + 1) + "\n"
|
165 |
+
output += "title: " + item["name"] + "\n"
|
166 |
+
output += "summary: " + item["snippet"] + "\n"
|
167 |
+
return output
|
168 |
+
|
169 |
+
def search_all(key_words: str, data: SessionData = data) -> list:
|
170 |
+
"""Search key_words, return a list of class SearchResult.
|
171 |
+
Keyword arguments:
|
172 |
+
key_words -- key words want to search
|
173 |
+
"""
|
174 |
+
result = bing_api.search(key_words)
|
175 |
+
data.content = []
|
176 |
+
data.content.append(ContentItem(CONTENT_TYPE.SEARCH_RESULT, result))
|
177 |
+
data.curResultChunk = 0
|
178 |
+
return data.content[-1].data["webPages"]["value"]
|
179 |
+
|
180 |
+
@tool.get("/load_page_index")
|
181 |
+
def load_page_index(idx: str) -> str:
|
182 |
+
"""Load page detail of the search result indexed as 'idx', and return the content of the page."""
|
183 |
+
idx = int(idx)
|
184 |
+
href, text = load_page(idx - 1)
|
185 |
+
if len(text) > 500:
|
186 |
+
return text[:500]
|
187 |
+
else:
|
188 |
+
return text
|
189 |
+
|
190 |
+
def load_page(idx: int, data: SessionData = data):
|
191 |
+
top = data.content[-1].data["webPages"]["value"]
|
192 |
+
ok, content = bing_api.load_page(top[idx]["url"])
|
193 |
+
if ok:
|
194 |
+
return top[idx]["url"], content
|
195 |
+
else:
|
196 |
+
return (
|
197 |
+
" ",
|
198 |
+
"Timeout for loading this page, Please try to load another one or search again.",
|
199 |
+
)
|
200 |
+
|
201 |
+
return tool
|
bing_search/readme.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Bing search tool
|
2 |
+
|
3 |
+
Contributor [ChengQian](https://github.com/qiancheng0)
|
bing_search/test_bing.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi.testclient import TestClient
|
2 |
+
from .api import build_tool, BingAPI
|
3 |
+
from typing import Tuple
|
4 |
+
|
5 |
+
BING_TEST_SEARCH = {
|
6 |
+
"webPages": {
|
7 |
+
"value": [
|
8 |
+
{"url": "a", "name": "test a", "snippet": "page a"},
|
9 |
+
{"url": "b", "name": "test b", "snippet": "page b"},
|
10 |
+
{"url": "c", "name": "test c", "snippet": "page c"},
|
11 |
+
]
|
12 |
+
}
|
13 |
+
}
|
14 |
+
|
15 |
+
|
16 |
+
class MockBingAPI(BingAPI):
|
17 |
+
def __init__(self):
|
18 |
+
pass
|
19 |
+
|
20 |
+
def search(self, key_words: str, max_retry: int = 3):
|
21 |
+
return BING_TEST_SEARCH
|
22 |
+
|
23 |
+
def load_page(self, url: str, max_retry: int = 3) -> Tuple[bool, str]:
|
24 |
+
if url == "a":
|
25 |
+
return True, "This is page a"
|
26 |
+
elif url == "b":
|
27 |
+
return True, "This is page b"
|
28 |
+
elif url == "c":
|
29 |
+
return True, "This is page c"
|
30 |
+
else:
|
31 |
+
return (
|
32 |
+
False,
|
33 |
+
"Timeout for loading this page, Please try to load another one or search again.",
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
app = build_tool({"debug": True, "bing_api": MockBingAPI()})
|
38 |
+
client = TestClient(app)
|
39 |
+
|
40 |
+
|
41 |
+
def test_bing():
|
42 |
+
# test search top 3
|
43 |
+
response = client.get("/search_top3", params={"key_words": "test"})
|
44 |
+
|
45 |
+
output = ""
|
46 |
+
for idx, item in enumerate(BING_TEST_SEARCH["webPages"]["value"]):
|
47 |
+
output += "page: " + str(idx + 1) + "\n"
|
48 |
+
output += "title: " + item["name"] + "\n"
|
49 |
+
output += "summary: " + item["snippet"] + "\n"
|
50 |
+
assert response.status_code == 200
|
51 |
+
assert response.json() == output
|
52 |
+
|
53 |
+
# test load page
|
54 |
+
response = client.get("/load_page_index", params={"idx": "1"})
|
55 |
+
assert response.status_code == 200
|
56 |
+
assert response.json() == "This is page a"
|
57 |
+
|
58 |
+
response = client.get("/load_page_index", params={"idx": "2"})
|
59 |
+
assert response.status_code == 200
|
60 |
+
assert response.json() == "This is page b"
|
chemical/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..registry import register
|
2 |
+
|
3 |
+
|
4 |
+
@register("chemical-prop")
|
5 |
+
def chemical_prop():
|
6 |
+
from .prop import build_tool
|
7 |
+
|
8 |
+
return build_tool
|
chemical/prop/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .api import build_tool
|