Spaces:
Running
Running
Metadata-Version: 2.1 | |
Name: lagent | |
Version: 0.5.0rc1 | |
Summary: A lightweight framework for building LLM-based agents | |
Home-page: https://github.com/InternLM/lagent | |
License: Apache 2.0 | |
Keywords: artificial general intelligence,agent,agi,llm | |
Description-Content-Type: text/markdown | |
License-File: LICENSE | |
Requires-Dist: aiohttp | |
Requires-Dist: arxiv | |
Requires-Dist: asyncache | |
Requires-Dist: asyncer | |
Requires-Dist: distro | |
Requires-Dist: duckduckgo_search==5.3.1b1 | |
Requires-Dist: filelock | |
Requires-Dist: func_timeout | |
Requires-Dist: griffe<1.0 | |
Requires-Dist: json5 | |
Requires-Dist: jsonschema | |
Requires-Dist: jupyter==1.0.0 | |
Requires-Dist: jupyter_client==8.6.2 | |
Requires-Dist: jupyter_core==5.7.2 | |
Requires-Dist: pydantic==2.6.4 | |
Requires-Dist: requests | |
Requires-Dist: termcolor | |
Requires-Dist: tiktoken | |
Requires-Dist: timeout-decorator | |
Requires-Dist: typing-extensions | |
Provides-Extra: all | |
Requires-Dist: google-search-results; extra == "all" | |
Requires-Dist: lmdeploy>=0.2.5; extra == "all" | |
Requires-Dist: pillow; extra == "all" | |
Requires-Dist: python-pptx; extra == "all" | |
Requires-Dist: timeout_decorator; extra == "all" | |
Requires-Dist: torch; extra == "all" | |
Requires-Dist: transformers<=4.40,>=4.34; extra == "all" | |
Requires-Dist: vllm>=0.3.3; extra == "all" | |
Requires-Dist: aiohttp; extra == "all" | |
Requires-Dist: arxiv; extra == "all" | |
Requires-Dist: asyncache; extra == "all" | |
Requires-Dist: asyncer; extra == "all" | |
Requires-Dist: distro; extra == "all" | |
Requires-Dist: duckduckgo_search==5.3.1b1; extra == "all" | |
Requires-Dist: filelock; extra == "all" | |
Requires-Dist: func_timeout; extra == "all" | |
Requires-Dist: griffe<1.0; extra == "all" | |
Requires-Dist: json5; extra == "all" | |
Requires-Dist: jsonschema; extra == "all" | |
Requires-Dist: jupyter==1.0.0; extra == "all" | |
Requires-Dist: jupyter_client==8.6.2; extra == "all" | |
Requires-Dist: jupyter_core==5.7.2; extra == "all" | |
Requires-Dist: pydantic==2.6.4; extra == "all" | |
Requires-Dist: requests; extra == "all" | |
Requires-Dist: termcolor; extra == "all" | |
Requires-Dist: tiktoken; extra == "all" | |
Requires-Dist: timeout-decorator; extra == "all" | |
Requires-Dist: typing-extensions; extra == "all" | |
Provides-Extra: optional | |
Requires-Dist: google-search-results; extra == "optional" | |
Requires-Dist: lmdeploy>=0.2.5; extra == "optional" | |
Requires-Dist: pillow; extra == "optional" | |
Requires-Dist: python-pptx; extra == "optional" | |
Requires-Dist: timeout_decorator; extra == "optional" | |
Requires-Dist: torch; extra == "optional" | |
Requires-Dist: transformers<=4.40,>=4.34; extra == "optional" | |
Requires-Dist: vllm>=0.3.3; extra == "optional" | |
<div id="top"></div> | |
<div align="center"> | |
<img src="docs/imgs/lagent_logo.png" width="450"/> | |
[![docs](https://img.shields.io/badge/docs-latest-blue)](https://lagent.readthedocs.io/en/latest/) | |
------------------------------------------------------------------------------------------------------------------------ | |
[{'content': '今天天气情况', 'sender': 'user', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': <AgentStatusCode.END: 0>}, {'content': '急', 'sender': 'Agent', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': <AgentStatusCode.END: 0>}] | |
``` | |
Clear the memory of this session(`session_id=0` by default): | |
```python | |
agent.memory.reset() | |
``` | |
### Custom Message Aggregation | |
`DefaultAggregator` is called under the hood to assemble and convert `AgentMessage` to OpenAI message format. | |
```python | |
def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]: | |
formatted_messages = self.aggregator.aggregate( | |
self.memory.get(session_id), | |
self.name, | |
self.output_format, | |
self.template, | |
) | |
llm_response = self.llm.chat(formatted_messages, **kwargs) | |
... | |
``` | |
Implement a simple aggregator that can receive few-shots | |
```python | |
from typing import List, Union | |
from lagent.memory import Memory | |
from lagent.prompts import StrParser | |
from lagent.agents.aggregator import DefaultAggregator | |
class FewshotAggregator(DefaultAggregator): | |
def __init__(self, few_shot: List[dict] = None): | |
self.few_shot = few_shot or | |
def aggregate(self, | |
messages: Memory, | |
name: str, | |
parser: StrParser = None, | |
system_instruction: Union[str, dict, List[dict]] = None) -> List[dict]: | |
_message = | |
if system_instruction: | |
_message.extend( | |
self.aggregate_system_intruction(system_instruction)) | |
_message.extend(self.few_shot) | |
messages = messages.get_memory() | |
for message in messages: | |
if message.sender == name: | |
_message.append( | |
dict(role='assistant', content=str(message.content))) | |
else: | |
user_message = message.content | |
if len(_message) > 0 and _message[-1]['role'] == 'user': | |
_message[-1]['content'] += user_message | |
else: | |
_message.append(dict(role='user', content=user_message)) | |
return _message | |
agent = Agent( | |
llm, | |
aggregator=FewshotAggregator( | |
[ | |
{"role": "user", "content": "今天天气"}, | |
{"role": "assistant", "content": "【晴】"}, | |
] | |
) | |
) | |
user_msg = AgentMessage(sender='user', content='昨天天气') | |
bot_msg = agent(user_msg) | |
print(bot_msg) | |
``` | |
``` | |
content='【多云转晴,夜间有轻微降温】' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=<AgentStatusCode.END: 0> | |
``` | |
### Flexible Response Formatting | |
In `AgentMessage`, `formatted` is reserved to store information parsed by `output_format` from the model output. | |
```python | |
def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]: | |
... | |
llm_response = self.llm.chat(formatted_messages, **kwargs) | |
if self.output_format: | |
formatted_messages = self.output_format.parse_response(llm_response) | |
return AgentMessage( | |
sender=self.name, | |
content=llm_response, | |
formatted=formatted_messages, | |
) | |
... | |
``` | |
Use a tool parser as follows | |
````python | |
from lagent.prompts.parsers import ToolParser | |
system_prompt = "逐步分析并编写Python代码解决以下问题。" | |
parser = ToolParser(tool_type='code interpreter', begin='```python\n', end='\n```\n') | |
llm.gen_params['stop_words'].append('\n```\n') | |
agent = Agent(llm, system_prompt, output_format=parser) | |
user_msg = AgentMessage( | |
sender='user', | |
content='Marie is thinking of a multiple of 63, while Jay is thinking of a ' | |
'factor of 63. They happen to be thinking of the same number. There are ' | |
'two possibilities for the number that each of them is thinking of, one ' | |
'positive and one negative. Find the product of these two numbers.') | |
bot_msg = agent(user_msg) | |
print(bot_msg.model_dump_json(indent=4)) | |
```` | |
```` | |
{ | |
"content": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n```python\ndef find_numbers():\n # 正因数\n positive_factors = | |
def before_agent(self, agent, messages, session_id): | |
for message in messages: | |
if message.sender in self.senders: | |
message.content = self.prefix + message.content | |
class AsyncBlogger(AsyncAgent): | |
def __init__(self, model_path, writer_prompt, critic_prompt, critic_prefix='', max_turn=3): | |
super().__init__() | |
llm = AsyncGPTAPI(model_type=model_path, retry=5, max_new_tokens=2048) | |
self.writer = AsyncAgent(llm, writer_prompt, name='writer') | |
self.critic = AsyncAgent( | |
llm, critic_prompt, name='critic', hooks=[PrefixedMessageHook(critic_prefix, | |
) | |
self.max_turn = max_turn | |
async def forward(self, message: AgentMessage, session_id=0) -> AgentMessage: | |
for _ in range(self.max_turn): | |
message = await self.writer(message, session_id=session_id) | |
message = await self.critic(message, session_id=session_id) | |
return await self.writer(message, session_id=session_id) | |
blogger = AsyncBlogger( | |
'gpt-4o-2024-05-13', | |
writer_prompt="You are an writing assistant tasked to write engaging blogpost. You try to generate the best blogpost possible for the user's request. " | |
"If the user provides critique, then respond with a revised version of your previous attempts", | |
critic_prompt="Generate critique and recommendations on the writing. Provide detailed recommendations, including requests for length, depth, style, etc..", | |
critic_prefix='Reflect and provide critique on the following writing. \n\n', | |
) | |
user_prompt = ( | |
"Write an engaging blogpost on the recent updates in {topic}. " | |
"The blogpost should be engaging and understandable for general audience. " | |
"Should have more than 3 paragraphes but no longer than 1000 words.") | |
bot_msgs = asyncio.get_event_loop().run_until_complete( | |
asyncio.gather( | |
*[ | |
blogger(AgentMessage(sender='user', content=user_prompt.format(topic=topic)), session_id=i) | |
for i, topic in enumerate(['AI', 'Biotechnology', 'New Energy', 'Video Games', 'Pop Music']) | |
] | |
) | |
) | |
print(bot_msgs[0].content) | |
print('-' * 120) | |
for msg in blogger.state_dict(session_id=0)['writer.memory']: | |
print('*' * 80) | |
print(f'{msg["sender"]}:\n\n{msg["content"]}') | |
print('-' * 120) | |
for msg in blogger.state_dict(session_id=0)['critic.memory']: | |
print('*' * 80) | |
print(f'{msg["sender"]}:\n\n{msg["content"]}') | |
``` | |
A multi-agent workflow that performs information retrieval, data collection and chart plotting ([original LangGraph example](https://vijaykumarkartha.medium.com/multiple-ai-agents-creating-multi-agent-workflows-using-langgraph-and-langchain-0587406ec4e6)) | |
<div align="center"> | |
<img src="https://miro.medium.com/v2/resize:fit:1400/format:webp/1*ffzadZCKXJT7n4JaRVFvcQ.jpeg" width="850" /> | |
</div> | |
````python | |
import json | |
from lagent.actions import IPythonInterpreter, WebBrowser, ActionExecutor | |
from lagent.agents.stream import get_plugin_prompt | |
from lagent.llms import GPTAPI | |
from lagent.hooks import InternLMActionProcessor | |
TOOL_TEMPLATE = ( | |
"You are a helpful AI assistant, collaborating with other assistants. Use the provided tools to progress" | |
" towards answering the question. If you are unable to fully answer, that's OK, another assistant with" | |
" different tools will help where you left off. Execute what you can to make progress. If you or any of" | |
" the other assistants have the final answer or deliverable, prefix your response with {finish_pattern}" | |
" so the team knows to stop. You have access to the following tools:\n{tool_description}\nPlease provide" | |
" your thought process when you need to use a tool, followed by the call statement in this format:" | |
"\n{invocation_format}\\\\n**{system_prompt}**" | |
) | |
class DataVisualizer(Agent): | |
def __init__(self, model_path, research_prompt, chart_prompt, finish_pattern="Final Answer", max_turn=10): | |
super().__init__() | |
llm = GPTAPI(model_path, key='YOUR_OPENAI_API_KEY', retry=5, max_new_tokens=1024, stop_words=["```\n"]) | |
interpreter, browser = IPythonInterpreter(), WebBrowser("BingSearch", api_key="YOUR_BING_API_KEY") | |
self.researcher = Agent( | |
llm, | |
TOOL_TEMPLATE.format( | |
finish_pattern=finish_pattern, | |
tool_description=get_plugin_prompt(browser), | |
invocation_format='```json\n{"name": {{tool name}}, "parameters": {{keyword arguments}}}\n```\n', | |
system_prompt=research_prompt, | |
), | |
output_format=ToolParser( | |
"browser", | |
begin="```json\n", | |
end="\n```\n", | |
validate=lambda x: json.loads(x.rstrip('`')), | |
), | |
aggregator=InternLMToolAggregator(), | |
name="researcher", | |
) | |
self.charter = Agent( | |
llm, | |
TOOL_TEMPLATE.format( | |
finish_pattern=finish_pattern, | |
tool_description=interpreter.name, | |
invocation_format='```python\n{{code}}\n```\n', | |
system_prompt=chart_prompt, | |
), | |
output_format=ToolParser( | |
"interpreter", | |
begin="```python\n", | |
end="\n```\n", | |
validate=lambda x: x.rstrip('`'), | |
), | |
aggregator=InternLMToolAggregator(), | |
name="charter", | |
) | |
self.executor = ActionExecutor([interpreter, browser], hooks=[InternLMActionProcessor()]) | |
self.finish_pattern = finish_pattern | |
self.max_turn = max_turn | |
def forward(self, message, session_id=0): | |
for _ in range(self.max_turn): | |
message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"]) # override llm stop words | |
while message.formatted["tool_type"]: | |
message = self.executor(message, session_id=session_id) | |
message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"]) | |
if self.finish_pattern in message.content: | |
return message | |
message = self.charter(message) | |
while message.formatted["tool_type"]: | |
message = self.executor(message, session_id=session_id) | |
message = self.charter(message, session_id=session_id) | |
if self.finish_pattern in message.content: | |
return message | |
return message | |
visualizer = DataVisualizer( | |
"gpt-4o-2024-05-13", | |
research_prompt="You should provide accurate data for the chart generator to use.", | |
chart_prompt="Any charts you display will be visible by the user.", | |
) | |
user_msg = AgentMessage( | |
sender='user', | |
content="Fetch the China's GDP over the past 5 years, then draw a line graph of it. Once you code it up, finish.") | |
bot_msg = visualizer(user_msg) | |
print(bot_msg.content) | |
json.dump(visualizer.state_dict(), open('visualizer.json', 'w'), ensure_ascii=False, indent=4) | |
```` | |
## Citation | |
If you find this project useful in your research, please consider cite: | |
```latex | |
@misc{lagent2023, | |
title={{Lagent: InternLM} a lightweight open-source framework that allows users to efficiently build large language model(LLM)-based agents}, | |
author={Lagent Developer Team}, | |
howpublished = {\url{https://github.com/InternLM/lagent}}, | |
year={2023} | |
} | |
``` | |
## License | |
This project is released under the [Apache 2.0 license](LICENSE). | |
<p align="right"><a href="#top">🔼 Back to top</a></p> | |