agents.py 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. #!/usr/bin/env python
  2. # coding=utf-8
  3. # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. import json
  17. import logging
  18. import re
  19. from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
  20. from .. import is_torch_available
  21. from ..utils import logging as transformers_logging
  22. from ..utils.import_utils import is_pygments_available
  23. from .agent_types import AgentAudio, AgentImage
  24. from .default_tools import BASE_PYTHON_TOOLS, FinalAnswerTool, setup_default_tools
  25. from .llm_engine import HfApiEngine, MessageRole
  26. from .prompts import (
  27. DEFAULT_CODE_SYSTEM_PROMPT,
  28. DEFAULT_REACT_CODE_SYSTEM_PROMPT,
  29. DEFAULT_REACT_JSON_SYSTEM_PROMPT,
  30. PLAN_UPDATE_FINAL_PLAN_REDACTION,
  31. PROMPTS_FOR_INITIAL_PLAN,
  32. PROMPTS_FOR_PLAN_UPDATE,
  33. SUPPORTED_PLAN_TYPES,
  34. SYSTEM_PROMPT_FACTS,
  35. SYSTEM_PROMPT_FACTS_UPDATE,
  36. USER_PROMPT_FACTS_UPDATE,
  37. )
  38. from .python_interpreter import LIST_SAFE_MODULES, evaluate_python_code
  39. from .tools import (
  40. DEFAULT_TOOL_DESCRIPTION_TEMPLATE,
  41. Tool,
  42. get_tool_description_with_args,
  43. load_tool,
  44. )
  45. if is_pygments_available():
  46. from pygments import highlight
  47. from pygments.formatters import Terminal256Formatter
  48. from pygments.lexers import PythonLexer
  49. class CustomFormatter(logging.Formatter):
  50. grey = "\x1b[38;20m"
  51. bold_yellow = "\x1b[33;1m"
  52. red = "\x1b[31;20m"
  53. green = "\x1b[32;20m"
  54. bold_green = "\x1b[32;20;1m"
  55. bold_red = "\x1b[31;1m"
  56. bold_white = "\x1b[37;1m"
  57. orange = "\x1b[38;5;214m"
  58. bold_orange = "\x1b[38;5;214;1m"
  59. reset = "\x1b[0m"
  60. format = "%(message)s"
  61. FORMATS = {
  62. logging.DEBUG: grey + format + reset,
  63. logging.INFO: format,
  64. logging.WARNING: bold_yellow + format + reset,
  65. logging.ERROR: red + format + reset,
  66. logging.CRITICAL: bold_red + format + reset,
  67. 31: reset + format + reset,
  68. 32: green + format + reset,
  69. 33: bold_green + format + reset,
  70. 34: bold_white + format + reset,
  71. 35: orange + format + reset,
  72. 36: bold_orange + format + reset,
  73. }
  74. def format(self, record):
  75. log_fmt = self.FORMATS.get(record.levelno)
  76. formatter = logging.Formatter(log_fmt)
  77. return formatter.format(record)
  78. logger = transformers_logging.get_logger(__name__)
  79. logger.propagate = False
  80. ch = logging.StreamHandler()
  81. ch.setFormatter(CustomFormatter())
  82. logger.addHandler(ch)
  83. def parse_json_blob(json_blob: str) -> Dict[str, str]:
  84. try:
  85. first_accolade_index = json_blob.find("{")
  86. last_accolade_index = [a.start() for a in list(re.finditer("}", json_blob))][-1]
  87. json_blob = json_blob[first_accolade_index : last_accolade_index + 1].replace('\\"', "'")
  88. json_data = json.loads(json_blob, strict=False)
  89. return json_data
  90. except json.JSONDecodeError as e:
  91. place = e.pos
  92. if json_blob[place - 1 : place + 2] == "},\n":
  93. raise ValueError(
  94. "JSON is invalid: you probably tried to provide multiple tool calls in one action. PROVIDE ONLY ONE TOOL CALL."
  95. )
  96. raise ValueError(
  97. f"The JSON blob you used is invalid due to the following error: {e}.\n"
  98. f"JSON blob was: {json_blob}, decoding failed on that specific part of the blob:\n"
  99. f"'{json_blob[place-4:place+5]}'."
  100. )
  101. except Exception as e:
  102. raise ValueError(f"Error in parsing the JSON blob: {e}")
  103. def parse_code_blob(code_blob: str) -> str:
  104. try:
  105. pattern = r"```(?:py|python)?\n(.*?)\n```"
  106. match = re.search(pattern, code_blob, re.DOTALL)
  107. return match.group(1).strip()
  108. except Exception as e:
  109. raise ValueError(
  110. f"""
  111. The code blob you used is invalid: due to the following error: {e}
  112. This means that the regex pattern {pattern} was not respected: make sure to include code with the correct pattern, for instance:
  113. Thoughts: Your thoughts
  114. Code:
  115. ```py
  116. # Your python code here
  117. ```<end_action>"""
  118. )
  119. def parse_json_tool_call(json_blob: str) -> Tuple[str, Dict[str, str]]:
  120. json_blob = json_blob.replace("```json", "").replace("```", "")
  121. tool_call = parse_json_blob(json_blob)
  122. if "action" in tool_call and "action_input" in tool_call:
  123. return tool_call["action"], tool_call["action_input"]
  124. elif "action" in tool_call:
  125. return tool_call["action"], None
  126. else:
  127. raise ValueError(
  128. f"Missing keys: {[key for key in ['action', 'action_input'] if key not in tool_call]} in blob {tool_call}"
  129. )
  130. def parse_text_tool_call(text: str) -> Tuple[str, Union[str, Dict[str, str]]]:
  131. """
  132. Expects a text in the format: 'Action:', 'Action input:', 'Observation:'. 'Action input:' contains a json string with input arguments.
  133. """
  134. try:
  135. if "Observation:" in text:
  136. text = text.split("Observation:")[0]
  137. if "Action:" in text:
  138. text = text.split("Action:")[1]
  139. tool_name, tool_input = text.split("Action input:")
  140. if "{" in tool_input:
  141. tool_input = parse_json_blob(tool_input)
  142. else:
  143. tool_input = tool_input.strip().replace('"', "")
  144. return tool_name.strip().replace('"', "").replace("\\", ""), tool_input
  145. except Exception as e:
  146. raise ValueError(
  147. f"Error in parsing the text tool call: {e}. Be sure to provide the correct format. DO NOT repeat your previous incorrect tool call."
  148. )
  149. def to_text(input: Union[List[Dict[str, str]], Dict[str, str], str]) -> str:
  150. if isinstance(input, list):
  151. return "\n".join([m["content"] for m in input])
  152. elif isinstance(input, dict):
  153. return input["content"]
  154. else:
  155. return input
  156. HUGGINGFACE_DEFAULT_TOOLS = {}
  157. _tools_are_initialized = False
  158. class Toolbox:
  159. """
  160. The toolbox contains all tools that the agent can perform operations with, as well as a few methods to
  161. manage them.
  162. Args:
  163. tools (`List[Tool]`):
  164. The list of tools to instantiate the toolbox with
  165. add_base_tools (`bool`, defaults to `False`, *optional*, defaults to `False`):
  166. Whether to add the tools available within `transformers` to the toolbox.
  167. """
  168. def __init__(self, tools: List[Tool], add_base_tools: bool = False):
  169. self._tools = {tool.name: tool for tool in tools}
  170. if add_base_tools:
  171. self.add_base_tools()
  172. self._load_tools_if_needed()
  173. def add_base_tools(self, add_python_interpreter: bool = False):
  174. global _tools_are_initialized
  175. global HUGGINGFACE_DEFAULT_TOOLS
  176. if not _tools_are_initialized:
  177. HUGGINGFACE_DEFAULT_TOOLS = setup_default_tools(logger)
  178. _tools_are_initialized = True
  179. for tool in HUGGINGFACE_DEFAULT_TOOLS.values():
  180. if tool.name != "python_interpreter" or add_python_interpreter:
  181. self.add_tool(tool)
  182. self._load_tools_if_needed()
  183. @property
  184. def tools(self) -> Dict[str, Tool]:
  185. """Get all tools currently in the toolbox"""
  186. return self._tools
  187. def show_tool_descriptions(self, tool_description_template: str = None) -> str:
  188. """
  189. Returns the description of all tools in the toolbox
  190. Args:
  191. tool_description_template (`str`, *optional*):
  192. The template to use to describe the tools. If not provided, the default template will be used.
  193. """
  194. return "\n".join(
  195. [get_tool_description_with_args(tool, tool_description_template) for tool in self._tools.values()]
  196. )
  197. def add_tool(self, tool: Tool):
  198. """
  199. Adds a tool to the toolbox
  200. Args:
  201. tool (`Tool`):
  202. The tool to add to the toolbox.
  203. """
  204. if tool.name in self._tools:
  205. raise KeyError(f"Error: tool '{tool.name}' already exists in the toolbox.")
  206. self._tools[tool.name] = tool
  207. def remove_tool(self, tool_name: str):
  208. """
  209. Removes a tool from the toolbox
  210. Args:
  211. tool_name (`str`):
  212. The tool to remove from the toolbox.
  213. """
  214. if tool_name not in self._tools:
  215. raise KeyError(
  216. f"Error: tool {tool_name} not found in toolbox for removal, should be instead one of {list(self._tools.keys())}."
  217. )
  218. del self._tools[tool_name]
  219. def update_tool(self, tool: Tool):
  220. """
  221. Updates a tool in the toolbox according to its name.
  222. Args:
  223. tool (`Tool`):
  224. The tool to update to the toolbox.
  225. """
  226. if tool.name not in self._tools:
  227. raise KeyError(
  228. f"Error: tool {tool.name} not found in toolbox for update, should be instead one of {list(self._tools.keys())}."
  229. )
  230. self._tools[tool.name] = tool
  231. def clear_toolbox(self):
  232. """Clears the toolbox"""
  233. self._tools = {}
  234. def _load_tools_if_needed(self):
  235. for name, tool in self._tools.items():
  236. if not isinstance(tool, Tool):
  237. task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id
  238. self._tools[name] = load_tool(task_or_repo_id)
  239. def __repr__(self):
  240. toolbox_description = "Toolbox contents:\n"
  241. for tool in self._tools.values():
  242. toolbox_description += f"\t{tool.name}: {tool.description}\n"
  243. return toolbox_description
  244. class AgentError(Exception):
  245. """Base class for other agent-related exceptions"""
  246. def __init__(self, message):
  247. super().__init__(message)
  248. self.message = message
  249. class AgentParsingError(AgentError):
  250. """Exception raised for errors in parsing in the agent"""
  251. pass
  252. class AgentExecutionError(AgentError):
  253. """Exception raised for errors in execution in the agent"""
  254. pass
  255. class AgentMaxIterationsError(AgentError):
  256. """Exception raised for errors in execution in the agent"""
  257. pass
  258. class AgentGenerationError(AgentError):
  259. """Exception raised for errors in generation in the agent"""
  260. pass
  261. def format_prompt_with_tools(toolbox: Toolbox, prompt_template: str, tool_description_template: str) -> str:
  262. tool_descriptions = toolbox.show_tool_descriptions(tool_description_template)
  263. prompt = prompt_template.replace("<<tool_descriptions>>", tool_descriptions)
  264. if "<<tool_names>>" in prompt:
  265. tool_names = [f"'{tool_name}'" for tool_name in toolbox.tools.keys()]
  266. prompt = prompt.replace("<<tool_names>>", ", ".join(tool_names))
  267. return prompt
  268. def show_agents_descriptions(managed_agents: list):
  269. managed_agents_descriptions = """
  270. You can also give requests to team members.
  271. Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'request', a long string explaning your request.
  272. Given that this team member is a real human, you should be very verbose in your request.
  273. Here is a list of the team members that you can call:"""
  274. for agent in managed_agents.values():
  275. managed_agents_descriptions += f"\n- {agent.name}: {agent.description}"
  276. return managed_agents_descriptions
  277. def format_prompt_with_managed_agents_descriptions(prompt_template, managed_agents=None) -> str:
  278. if managed_agents is not None:
  279. return prompt_template.replace("<<managed_agents_descriptions>>", show_agents_descriptions(managed_agents))
  280. else:
  281. return prompt_template.replace("<<managed_agents_descriptions>>", "")
  282. def format_prompt_with_imports(prompt_template: str, authorized_imports: List[str]) -> str:
  283. if "<<authorized_imports>>" not in prompt_template:
  284. raise AgentError("Tag '<<authorized_imports>>' should be provided in the prompt.")
  285. return prompt_template.replace("<<authorized_imports>>", str(authorized_imports))
  286. class Agent:
  287. def __init__(
  288. self,
  289. tools: Union[List[Tool], Toolbox],
  290. llm_engine: Callable = HfApiEngine(),
  291. system_prompt=DEFAULT_REACT_CODE_SYSTEM_PROMPT,
  292. tool_description_template=None,
  293. additional_args={},
  294. max_iterations: int = 6,
  295. tool_parser=parse_json_tool_call,
  296. add_base_tools: bool = False,
  297. verbose: int = 0,
  298. grammar: Dict[str, str] = None,
  299. managed_agents: List = None,
  300. ):
  301. self.agent_name = self.__class__.__name__
  302. self.llm_engine = llm_engine
  303. self.system_prompt_template = system_prompt
  304. self.tool_description_template = (
  305. tool_description_template if tool_description_template else DEFAULT_TOOL_DESCRIPTION_TEMPLATE
  306. )
  307. self.additional_args = additional_args
  308. self.max_iterations = max_iterations
  309. self.logger = logger
  310. self.tool_parser = tool_parser
  311. self.grammar = grammar
  312. self.managed_agents = None
  313. if managed_agents is not None:
  314. self.managed_agents = {agent.name: agent for agent in managed_agents}
  315. if isinstance(tools, Toolbox):
  316. self._toolbox = tools
  317. if add_base_tools:
  318. if not is_torch_available():
  319. raise ImportError("Using the base tools requires torch to be installed.")
  320. self._toolbox.add_base_tools(add_python_interpreter=(self.__class__ == ReactJsonAgent))
  321. else:
  322. self._toolbox = Toolbox(tools, add_base_tools=add_base_tools)
  323. self._toolbox.add_tool(FinalAnswerTool())
  324. self.system_prompt = format_prompt_with_tools(
  325. self._toolbox, self.system_prompt_template, self.tool_description_template
  326. )
  327. self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents)
  328. self.prompt = None
  329. self.logs = []
  330. self.task = None
  331. if verbose == 0:
  332. logger.setLevel(logging.WARNING)
  333. elif verbose == 1:
  334. logger.setLevel(logging.INFO)
  335. elif verbose == 2:
  336. logger.setLevel(logging.DEBUG)
  337. @property
  338. def toolbox(self) -> Toolbox:
  339. """Get the toolbox currently available to the agent"""
  340. return self._toolbox
  341. def initialize_for_run(self):
  342. self.token_count = 0
  343. self.system_prompt = format_prompt_with_tools(
  344. self._toolbox,
  345. self.system_prompt_template,
  346. self.tool_description_template,
  347. )
  348. self.system_prompt = format_prompt_with_managed_agents_descriptions(self.system_prompt, self.managed_agents)
  349. if hasattr(self, "authorized_imports"):
  350. self.system_prompt = format_prompt_with_imports(
  351. self.system_prompt, list(set(LIST_SAFE_MODULES) | set(self.authorized_imports))
  352. )
  353. self.logs = [{"system_prompt": self.system_prompt, "task": self.task}]
  354. self.logger.log(33, "======== New task ========")
  355. self.logger.log(34, self.task)
  356. self.logger.debug("System prompt is as follows:")
  357. self.logger.debug(self.system_prompt)
  358. def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> List[Dict[str, str]]:
  359. """
  360. Reads past llm_outputs, actions, and observations or errors from the logs into a series of messages
  361. that can be used as input to the LLM.
  362. """
  363. prompt_message = {"role": MessageRole.SYSTEM, "content": self.logs[0]["system_prompt"]}
  364. task_message = {
  365. "role": MessageRole.USER,
  366. "content": "Task: " + self.logs[0]["task"],
  367. }
  368. if summary_mode:
  369. memory = [task_message]
  370. else:
  371. memory = [prompt_message, task_message]
  372. for i, step_log in enumerate(self.logs[1:]):
  373. if "llm_output" in step_log and not summary_mode:
  374. thought_message = {"role": MessageRole.ASSISTANT, "content": step_log["llm_output"].strip()}
  375. memory.append(thought_message)
  376. if "facts" in step_log:
  377. thought_message = {
  378. "role": MessageRole.ASSISTANT,
  379. "content": "[FACTS LIST]:\n" + step_log["facts"].strip(),
  380. }
  381. memory.append(thought_message)
  382. if "plan" in step_log and not summary_mode:
  383. thought_message = {"role": MessageRole.ASSISTANT, "content": "[PLAN]:\n" + step_log["plan"].strip()}
  384. memory.append(thought_message)
  385. if "tool_call" in step_log and summary_mode:
  386. tool_call_message = {
  387. "role": MessageRole.ASSISTANT,
  388. "content": f"[STEP {i} TOOL CALL]: " + str(step_log["tool_call"]).strip(),
  389. }
  390. memory.append(tool_call_message)
  391. if "task" in step_log:
  392. tool_call_message = {
  393. "role": MessageRole.USER,
  394. "content": "New task:\n" + step_log["task"],
  395. }
  396. memory.append(tool_call_message)
  397. if "error" in step_log or "observation" in step_log:
  398. if "error" in step_log:
  399. message_content = (
  400. f"[OUTPUT OF STEP {i}] -> Error:\n"
  401. + str(step_log["error"])
  402. + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n"
  403. )
  404. elif "observation" in step_log:
  405. message_content = f"[OUTPUT OF STEP {i}] -> Observation:\n{step_log['observation']}"
  406. tool_response_message = {"role": MessageRole.TOOL_RESPONSE, "content": message_content}
  407. memory.append(tool_response_message)
  408. return memory
  409. def get_succinct_logs(self):
  410. return [{key: value for key, value in log.items() if key != "agent_memory"} for log in self.logs]
  411. def extract_action(self, llm_output: str, split_token: str) -> str:
  412. """
  413. Parse action from the LLM output
  414. Args:
  415. llm_output (`str`): Output of the LLM
  416. split_token (`str`): Separator for the action. Should match the example in the system prompt.
  417. """
  418. try:
  419. split = llm_output.split(split_token)
  420. rationale, action = (
  421. split[-2],
  422. split[-1],
  423. ) # NOTE: using indexes starting from the end solves for when you have more than one split_token in the output
  424. except Exception as e:
  425. self.logger.error(e, exc_info=1)
  426. raise AgentParsingError(
  427. f"Error: No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!"
  428. )
  429. return rationale.strip(), action.strip()
  430. def execute_tool_call(self, tool_name: str, arguments: Dict[str, str]) -> Any:
  431. """
  432. Execute tool with the provided input and returns the result.
  433. This method replaces arguments with the actual values from the state if they refer to state variables.
  434. Args:
  435. tool_name (`str`): Name of the Tool to execute (should be one from self.toolbox).
  436. arguments (Dict[str, str]): Arguments passed to the Tool.
  437. """
  438. available_tools = self.toolbox.tools
  439. if self.managed_agents is not None:
  440. available_tools = {**available_tools, **self.managed_agents}
  441. if tool_name not in available_tools:
  442. error_msg = f"Error: unknown tool {tool_name}, should be instead one of {list(available_tools.keys())}."
  443. self.logger.error(error_msg, exc_info=1)
  444. raise AgentExecutionError(error_msg)
  445. try:
  446. if isinstance(arguments, str):
  447. observation = available_tools[tool_name](arguments)
  448. elif isinstance(arguments, dict):
  449. for key, value in arguments.items():
  450. # if the value is the name of a state variable like "image.png", replace it with the actual value
  451. if isinstance(value, str) and value in self.state:
  452. arguments[key] = self.state[value]
  453. observation = available_tools[tool_name](**arguments)
  454. else:
  455. raise AgentExecutionError(
  456. f"Arguments passed to tool should be a dict or string: got a {type(arguments)}."
  457. )
  458. return observation
  459. except Exception as e:
  460. if tool_name in self.toolbox.tools:
  461. raise AgentExecutionError(
  462. f"Error in tool call execution: {e}\nYou should only use this tool with a correct input.\n"
  463. f"As a reminder, this tool's description is the following:\n{get_tool_description_with_args(available_tools[tool_name])}"
  464. )
  465. elif tool_name in self.managed_agents:
  466. raise AgentExecutionError(
  467. f"Error in calling team member: {e}\nYou should only ask this team member with a correct request.\n"
  468. f"As a reminder, this team member's description is the following:\n{available_tools[tool_name]}"
  469. )
  470. def log_rationale_code_action(self, rationale: str, code_action: str) -> None:
  471. self.logger.warning("=== Agent thoughts:")
  472. self.logger.log(31, rationale)
  473. self.logger.warning(">>> Agent is executing the code below:")
  474. if is_pygments_available():
  475. self.logger.log(
  476. 31, highlight(code_action, PythonLexer(ensurenl=False), Terminal256Formatter(style="nord"))
  477. )
  478. else:
  479. self.logger.log(31, code_action)
  480. self.logger.warning("====")
  481. def run(self, **kwargs):
  482. """To be implemented in the child class"""
  483. raise NotImplementedError
  484. class CodeAgent(Agent):
  485. """
  486. A class for an agent that solves the given task using a single block of code. It plans all its actions, then executes all in one shot.
  487. """
  488. def __init__(
  489. self,
  490. tools: List[Tool],
  491. llm_engine: Callable = HfApiEngine(),
  492. system_prompt: str = DEFAULT_CODE_SYSTEM_PROMPT,
  493. tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE,
  494. grammar: Dict[str, str] = None,
  495. additional_authorized_imports: Optional[List[str]] = None,
  496. **kwargs,
  497. ):
  498. super().__init__(
  499. tools=tools,
  500. llm_engine=llm_engine,
  501. system_prompt=system_prompt,
  502. tool_description_template=tool_description_template,
  503. grammar=grammar,
  504. **kwargs,
  505. )
  506. if not is_pygments_available():
  507. transformers_logging.warning_once(
  508. logger,
  509. "pygments isn't installed. Installing pygments will enable color syntax highlighting in the "
  510. "CodeAgent.",
  511. )
  512. self.python_evaluator = evaluate_python_code
  513. self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else []
  514. self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports))
  515. self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports))
  516. def parse_code_blob(self, result: str) -> str:
  517. """
  518. Override this method if you want to change the way the code is
  519. cleaned in the `run` method.
  520. """
  521. return parse_code_blob(result)
  522. def run(self, task: str, return_generated_code: bool = False, **kwargs):
  523. """
  524. Runs the agent for the given task.
  525. Args:
  526. task (`str`): The task to perform
  527. return_generated_code (`bool`, *optional*, defaults to `False`): Whether to return the generated code instead of running it
  528. kwargs (additional keyword arguments, *optional*):
  529. Any keyword argument to send to the agent when evaluating the code.
  530. Example:
  531. ```py
  532. from transformers.agents import CodeAgent
  533. agent = CodeAgent(tools=[])
  534. agent.run("What is the result of 2 power 3.7384?")
  535. ```
  536. """
  537. self.task = task
  538. if len(kwargs) > 0:
  539. self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}."
  540. self.state = kwargs.copy()
  541. self.initialize_for_run()
  542. # Run LLM
  543. prompt_message = {"role": MessageRole.SYSTEM, "content": self.system_prompt}
  544. task_message = {
  545. "role": MessageRole.USER,
  546. "content": "Task: " + self.task,
  547. }
  548. self.prompt = [prompt_message, task_message]
  549. self.logger.info("====Executing with this prompt====")
  550. self.logger.info(self.prompt)
  551. additional_args = {"grammar": self.grammar} if self.grammar is not None else {}
  552. llm_output = self.llm_engine(self.prompt, stop_sequences=["<end_action>"], **additional_args)
  553. if return_generated_code:
  554. return llm_output
  555. # Parse
  556. try:
  557. rationale, code_action = self.extract_action(llm_output=llm_output, split_token="Code:")
  558. except Exception as e:
  559. self.logger.debug(
  560. f"Error in extracting action, trying to parse the whole output as code. Error trace: {e}"
  561. )
  562. rationale, code_action = "", llm_output
  563. try:
  564. code_action = self.parse_code_blob(code_action)
  565. except Exception as e:
  566. error_msg = f"Error in code parsing: {e}. Be sure to provide correct code"
  567. self.logger.error(error_msg, exc_info=1)
  568. return error_msg
  569. # Execute
  570. self.log_rationale_code_action(rationale, code_action)
  571. try:
  572. available_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools}
  573. output = self.python_evaluator(
  574. code_action,
  575. static_tools=available_tools,
  576. custom_tools={},
  577. state=self.state,
  578. authorized_imports=self.authorized_imports,
  579. )
  580. self.logger.info(self.state["print_outputs"])
  581. return output
  582. except Exception as e:
  583. error_msg = f"Error in execution: {e}. Be sure to provide correct code."
  584. self.logger.error(error_msg, exc_info=1)
  585. return error_msg
  586. class ReactAgent(Agent):
  587. """
  588. This agent that solves the given task step by step, using the ReAct framework:
  589. While the objective is not reached, the agent will perform a cycle of thinking and acting.
  590. The action will be parsed from the LLM output: it consists in calls to tools from the toolbox, with arguments chosen by the LLM engine.
  591. """
  592. def __init__(
  593. self,
  594. tools: List[Tool],
  595. llm_engine: Callable = HfApiEngine(),
  596. system_prompt: str = DEFAULT_REACT_CODE_SYSTEM_PROMPT,
  597. tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE,
  598. grammar: Dict[str, str] = None,
  599. plan_type: Literal[tuple(SUPPORTED_PLAN_TYPES)] = SUPPORTED_PLAN_TYPES[0],
  600. planning_interval: Optional[int] = None,
  601. **kwargs,
  602. ):
  603. assert plan_type in SUPPORTED_PLAN_TYPES, f"plan type {plan_type} is not supported"
  604. super().__init__(
  605. tools=tools,
  606. llm_engine=llm_engine,
  607. system_prompt=system_prompt,
  608. tool_description_template=tool_description_template,
  609. grammar=grammar,
  610. **kwargs,
  611. )
  612. self.planning_interval = planning_interval
  613. self.plan_type = plan_type
  614. def provide_final_answer(self, task) -> str:
  615. """
  616. This method provides a final answer to the task, based on the logs of the agent's interactions.
  617. """
  618. self.prompt = [
  619. {
  620. "role": MessageRole.SYSTEM,
  621. "content": "An agent tried to answer an user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:",
  622. }
  623. ]
  624. self.prompt += self.write_inner_memory_from_logs()[1:]
  625. self.prompt += [
  626. {
  627. "role": MessageRole.USER,
  628. "content": f"Based on the above, please provide an answer to the following user request:\n{task}",
  629. }
  630. ]
  631. try:
  632. return self.llm_engine(self.prompt)
  633. except Exception as e:
  634. return f"Error in generating final llm output: {e}."
  635. def run(self, task: str, stream: bool = False, reset: bool = True, **kwargs):
  636. """
  637. Runs the agent for the given task.
  638. Args:
  639. task (`str`): The task to perform
  640. Example:
  641. ```py
  642. from transformers.agents import ReactCodeAgent
  643. agent = ReactCodeAgent(tools=[])
  644. agent.run("What is the result of 2 power 3.7384?")
  645. ```
  646. """
  647. self.task = task
  648. if len(kwargs) > 0:
  649. self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}."
  650. self.state = kwargs.copy()
  651. if reset:
  652. self.initialize_for_run()
  653. else:
  654. self.logs.append({"task": task})
  655. if stream:
  656. return self.stream_run(task)
  657. else:
  658. return self.direct_run(task)
  659. def stream_run(self, task: str):
  660. """
  661. Runs the agent in streaming mode, yielding steps as they are executed: should be launched only in the `run` method.
  662. """
  663. final_answer = None
  664. iteration = 0
  665. while final_answer is None and iteration < self.max_iterations:
  666. try:
  667. step_logs = self.step()
  668. if "final_answer" in step_logs:
  669. final_answer = step_logs["final_answer"]
  670. except AgentError as e:
  671. self.logger.error(e, exc_info=1)
  672. self.logs[-1]["error"] = e
  673. finally:
  674. iteration += 1
  675. yield self.logs[-1]
  676. if final_answer is None and iteration == self.max_iterations:
  677. error_message = "Reached max iterations."
  678. final_step_log = {"error": AgentMaxIterationsError(error_message)}
  679. self.logs.append(final_step_log)
  680. self.logger.error(error_message, exc_info=1)
  681. final_answer = self.provide_final_answer(task)
  682. final_step_log["final_answer"] = final_answer
  683. yield final_step_log
  684. yield final_answer
  685. def direct_run(self, task: str):
  686. """
  687. Runs the agent in direct mode, returning outputs only at the end: should be launched only in the `run` method.
  688. """
  689. final_answer = None
  690. iteration = 0
  691. while final_answer is None and iteration < self.max_iterations:
  692. try:
  693. if self.planning_interval is not None and iteration % self.planning_interval == 0:
  694. self.planning_step(task, is_first_step=(iteration == 0), iteration=iteration)
  695. step_logs = self.step()
  696. if "final_answer" in step_logs:
  697. final_answer = step_logs["final_answer"]
  698. except AgentError as e:
  699. self.logger.error(e, exc_info=1)
  700. self.logs[-1]["error"] = e
  701. finally:
  702. iteration += 1
  703. if final_answer is None and iteration == self.max_iterations:
  704. error_message = "Reached max iterations."
  705. final_step_log = {"error": AgentMaxIterationsError(error_message)}
  706. self.logs.append(final_step_log)
  707. self.logger.error(error_message, exc_info=1)
  708. final_answer = self.provide_final_answer(task)
  709. final_step_log["final_answer"] = final_answer
  710. return final_answer
  711. def planning_step(self, task, is_first_step: bool = False, iteration: int = None):
  712. """
  713. Used periodically by the agent to plan the next steps to reach the objective.
  714. Args:
  715. task (`str`): The task to perform
  716. is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan.
  717. iteration (`int`): The number of the current step, used as an indication for the LLM.
  718. """
  719. if is_first_step:
  720. message_prompt_facts = {"role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS}
  721. message_prompt_task = {
  722. "role": MessageRole.USER,
  723. "content": f"""Here is the task:
  724. ```
  725. {task}
  726. ```
  727. Now begin!""",
  728. }
  729. answer_facts = self.llm_engine([message_prompt_facts, message_prompt_task])
  730. message_system_prompt_plan = {
  731. "role": MessageRole.SYSTEM,
  732. "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["system"],
  733. }
  734. message_user_prompt_plan = {
  735. "role": MessageRole.USER,
  736. "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["user"].format(
  737. task=task,
  738. tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template),
  739. managed_agents_descriptions=(
  740. show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else ""
  741. ),
  742. answer_facts=answer_facts,
  743. ),
  744. }
  745. answer_plan = self.llm_engine(
  746. [message_system_prompt_plan, message_user_prompt_plan], stop_sequences=["<end_plan>"]
  747. )
  748. final_plan_redaction = f"""Here is the plan of action that I will follow to solve the task:
  749. ```
  750. {answer_plan}
  751. ```"""
  752. final_facts_redaction = f"""Here are the facts that I know so far:
  753. ```
  754. {answer_facts}
  755. ```""".strip()
  756. self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction})
  757. self.logger.log(36, "===== Initial plan =====")
  758. self.logger.log(35, final_plan_redaction)
  759. else: # update plan
  760. agent_memory = self.write_inner_memory_from_logs(
  761. summary_mode=False
  762. ) # This will not log the plan but will log facts
  763. # Redact updated facts
  764. facts_update_system_prompt = {
  765. "role": MessageRole.SYSTEM,
  766. "content": SYSTEM_PROMPT_FACTS_UPDATE,
  767. }
  768. facts_update_message = {
  769. "role": MessageRole.USER,
  770. "content": USER_PROMPT_FACTS_UPDATE,
  771. }
  772. facts_update = self.llm_engine([facts_update_system_prompt] + agent_memory + [facts_update_message])
  773. # Redact updated plan
  774. plan_update_message = {
  775. "role": MessageRole.SYSTEM,
  776. "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["system"].format(task=task),
  777. }
  778. plan_update_message_user = {
  779. "role": MessageRole.USER,
  780. "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["user"].format(
  781. task=task,
  782. tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template),
  783. managed_agents_descriptions=(
  784. show_agents_descriptions(self.managed_agents) if self.managed_agents is not None else ""
  785. ),
  786. facts_update=facts_update,
  787. remaining_steps=(self.max_iterations - iteration),
  788. ),
  789. }
  790. plan_update = self.llm_engine(
  791. [plan_update_message] + agent_memory + [plan_update_message_user], stop_sequences=["<end_plan>"]
  792. )
  793. # Log final facts and plan
  794. final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update)
  795. final_facts_redaction = f"""Here is the updated list of the facts that I know:
  796. ```
  797. {facts_update}
  798. ```"""
  799. self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction})
  800. self.logger.log(36, "===== Updated plan =====")
  801. self.logger.log(35, final_plan_redaction)
  802. class ReactJsonAgent(ReactAgent):
  803. """
  804. This agent that solves the given task step by step, using the ReAct framework:
  805. While the objective is not reached, the agent will perform a cycle of thinking and acting.
  806. The tool calls will be formulated by the LLM in JSON format, then parsed and executed.
  807. """
  808. def __init__(
  809. self,
  810. tools: List[Tool],
  811. llm_engine: Callable = HfApiEngine(),
  812. system_prompt: str = DEFAULT_REACT_JSON_SYSTEM_PROMPT,
  813. tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE,
  814. grammar: Dict[str, str] = None,
  815. planning_interval: Optional[int] = None,
  816. **kwargs,
  817. ):
  818. super().__init__(
  819. tools=tools,
  820. llm_engine=llm_engine,
  821. system_prompt=system_prompt,
  822. tool_description_template=tool_description_template,
  823. grammar=grammar,
  824. planning_interval=planning_interval,
  825. **kwargs,
  826. )
  827. def step(self):
  828. """
  829. Perform one step in the ReAct framework: the agent thinks, acts, and observes the result.
  830. The errors are raised here, they are caught and logged in the run() method.
  831. """
  832. agent_memory = self.write_inner_memory_from_logs()
  833. self.prompt = agent_memory
  834. self.logger.debug("===== New step =====")
  835. # Add new step in logs
  836. current_step_logs = {}
  837. self.logs.append(current_step_logs)
  838. current_step_logs["agent_memory"] = agent_memory.copy()
  839. self.logger.info("===== Calling LLM with this last message: =====")
  840. self.logger.info(self.prompt[-1])
  841. try:
  842. additional_args = {"grammar": self.grammar} if self.grammar is not None else {}
  843. llm_output = self.llm_engine(
  844. self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args
  845. )
  846. except Exception as e:
  847. raise AgentGenerationError(f"Error in generating llm output: {e}.")
  848. self.logger.debug("===== Output message of the LLM: =====")
  849. self.logger.debug(llm_output)
  850. current_step_logs["llm_output"] = llm_output
  851. # Parse
  852. self.logger.debug("===== Extracting action =====")
  853. rationale, action = self.extract_action(llm_output=llm_output, split_token="Action:")
  854. try:
  855. tool_name, arguments = self.tool_parser(action)
  856. except Exception as e:
  857. raise AgentParsingError(f"Could not parse the given action: {e}.")
  858. current_step_logs["rationale"] = rationale
  859. current_step_logs["tool_call"] = {"tool_name": tool_name, "tool_arguments": arguments}
  860. # Execute
  861. self.logger.warning("=== Agent thoughts:")
  862. self.logger.log(31, rationale)
  863. self.logger.warning(f">>> Calling tool: '{tool_name}' with arguments: {arguments}")
  864. if tool_name == "final_answer":
  865. if isinstance(arguments, dict):
  866. if "answer" in arguments:
  867. answer = arguments["answer"]
  868. if (
  869. isinstance(answer, str) and answer in self.state.keys()
  870. ): # if the answer is a state variable, return the value
  871. answer = self.state[answer]
  872. else:
  873. answer = arguments
  874. else:
  875. answer = arguments
  876. current_step_logs["final_answer"] = answer
  877. return current_step_logs
  878. else:
  879. if arguments is None:
  880. arguments = {}
  881. observation = self.execute_tool_call(tool_name, arguments)
  882. observation_type = type(observation)
  883. if observation_type in [AgentImage, AgentAudio]:
  884. if observation_type == AgentImage:
  885. observation_name = "image.png"
  886. elif observation_type == AgentAudio:
  887. observation_name = "audio.mp3"
  888. # TODO: observation naming could allow for different names of same type
  889. self.state[observation_name] = observation
  890. updated_information = f"Stored '{observation_name}' in memory."
  891. else:
  892. updated_information = str(observation).strip()
  893. self.logger.info(updated_information)
  894. current_step_logs["observation"] = updated_information
  895. return current_step_logs
  896. class ReactCodeAgent(ReactAgent):
  897. """
  898. This agent that solves the given task step by step, using the ReAct framework:
  899. While the objective is not reached, the agent will perform a cycle of thinking and acting.
  900. The tool calls will be formulated by the LLM in code format, then parsed and executed.
  901. """
  902. def __init__(
  903. self,
  904. tools: List[Tool],
  905. llm_engine: Callable = HfApiEngine(),
  906. system_prompt: str = DEFAULT_REACT_CODE_SYSTEM_PROMPT,
  907. tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE,
  908. grammar: Dict[str, str] = None,
  909. additional_authorized_imports: Optional[List[str]] = None,
  910. planning_interval: Optional[int] = None,
  911. **kwargs,
  912. ):
  913. super().__init__(
  914. tools=tools,
  915. llm_engine=llm_engine,
  916. system_prompt=system_prompt,
  917. tool_description_template=tool_description_template,
  918. grammar=grammar,
  919. planning_interval=planning_interval,
  920. **kwargs,
  921. )
  922. if not is_pygments_available():
  923. transformers_logging.warning_once(
  924. logger,
  925. "pygments isn't installed. Installing pygments will enable color syntax highlighting in the "
  926. "ReactCodeAgent.",
  927. )
  928. self.python_evaluator = evaluate_python_code
  929. self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else []
  930. self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports))
  931. self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports))
  932. self.custom_tools = {}
  933. def step(self):
  934. """
  935. Perform one step in the ReAct framework: the agent thinks, acts, and observes the result.
  936. The errors are raised here, they are caught and logged in the run() method.
  937. """
  938. agent_memory = self.write_inner_memory_from_logs()
  939. self.prompt = agent_memory.copy()
  940. self.logger.debug("===== New step =====")
  941. # Add new step in logs
  942. current_step_logs = {}
  943. self.logs.append(current_step_logs)
  944. current_step_logs["agent_memory"] = agent_memory.copy()
  945. self.logger.info("===== Calling LLM with these last messages: =====")
  946. self.logger.info(self.prompt[-2:])
  947. try:
  948. additional_args = {"grammar": self.grammar} if self.grammar is not None else {}
  949. llm_output = self.llm_engine(
  950. self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args
  951. )
  952. except Exception as e:
  953. raise AgentGenerationError(f"Error in generating llm output: {e}.")
  954. self.logger.debug("=== Output message of the LLM:")
  955. self.logger.debug(llm_output)
  956. current_step_logs["llm_output"] = llm_output
  957. # Parse
  958. self.logger.debug("=== Extracting action ===")
  959. try:
  960. rationale, raw_code_action = self.extract_action(llm_output=llm_output, split_token="Code:")
  961. except Exception as e:
  962. self.logger.debug(f"Error in extracting action, trying to parse the whole output. Error trace: {e}")
  963. rationale, raw_code_action = llm_output, llm_output
  964. try:
  965. code_action = parse_code_blob(raw_code_action)
  966. except Exception as e:
  967. error_msg = f"Error in code parsing: {e}. Make sure to provide correct code"
  968. raise AgentParsingError(error_msg)
  969. current_step_logs["rationale"] = rationale
  970. current_step_logs["tool_call"] = {"tool_name": "code interpreter", "tool_arguments": code_action}
  971. # Execute
  972. self.log_rationale_code_action(rationale, code_action)
  973. try:
  974. static_tools = {
  975. **BASE_PYTHON_TOOLS.copy(),
  976. **self.toolbox.tools,
  977. }
  978. if self.managed_agents is not None:
  979. static_tools = {**static_tools, **self.managed_agents}
  980. result = self.python_evaluator(
  981. code_action,
  982. static_tools=static_tools,
  983. custom_tools=self.custom_tools,
  984. state=self.state,
  985. authorized_imports=self.authorized_imports,
  986. )
  987. self.logger.warning("Print outputs:")
  988. self.logger.log(32, self.state["print_outputs"])
  989. if result is not None:
  990. self.logger.warning("Last output from code snippet:")
  991. self.logger.log(32, str(result))
  992. observation = "Print outputs:\n" + self.state["print_outputs"]
  993. if result is not None:
  994. observation += "Last output from code snippet:\n" + str(result)[:100000]
  995. current_step_logs["observation"] = observation
  996. except Exception as e:
  997. error_msg = f"Code execution failed due to the following error:\n{str(e)}"
  998. if "'dict' object has no attribute 'read'" in str(e):
  999. error_msg += "\nYou get this error because you passed a dict as input for one of the arguments instead of a string."
  1000. raise AgentExecutionError(error_msg)
  1001. for line in code_action.split("\n"):
  1002. if line[: len("final_answer")] == "final_answer":
  1003. self.logger.log(33, "Final answer:")
  1004. self.logger.log(32, result)
  1005. current_step_logs["final_answer"] = result
  1006. return current_step_logs
  1007. class ManagedAgent:
  1008. def __init__(self, agent, name, description, additional_prompting=None, provide_run_summary=False):
  1009. self.agent = agent
  1010. self.name = name
  1011. self.description = description
  1012. self.additional_prompting = additional_prompting
  1013. self.provide_run_summary = provide_run_summary
  1014. def write_full_task(self, task):
  1015. full_task = f"""You're a helpful agent named '{self.name}'.
  1016. You have been submitted this task by your manager.
  1017. ---
  1018. Task:
  1019. {task}
  1020. ---
  1021. You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible so that they have a clear understanding of the answer.
  1022. Your final_answer WILL HAVE to contain these parts:
  1023. ### 1. Task outcome (short version):
  1024. ### 2. Task outcome (extremely detailed version):
  1025. ### 3. Additional context (if relevant):
  1026. Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
  1027. And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
  1028. <<additional_prompting>>"""
  1029. if self.additional_prompting:
  1030. full_task = full_task.replace("\n<<additional_prompting>>", self.additional_prompting).strip()
  1031. else:
  1032. full_task = full_task.replace("\n<<additional_prompting>>", "").strip()
  1033. return full_task
  1034. def __call__(self, request, **kwargs):
  1035. full_task = self.write_full_task(request)
  1036. output = self.agent.run(full_task, **kwargs)
  1037. if self.provide_run_summary:
  1038. answer = f"Here is the final answer from your managed agent '{self.name}':\n"
  1039. answer += str(output)
  1040. answer += f"\n\nFor more detail, find below a summary of this agent's work:\nSUMMARY OF WORK FROM AGENT '{self.name}':\n"
  1041. for message in self.agent.write_inner_memory_from_logs(summary_mode=True):
  1042. content = message["content"]
  1043. if len(str(content)) < 1000 or "[FACTS LIST]" in str(content):
  1044. answer += "\n" + str(content) + "\n---"
  1045. else:
  1046. answer += "\n" + str(content)[:1000] + "\n(...Step was truncated because too long)...\n---"
  1047. answer += f"\nEND OF SUMMARY OF WORK FROM AGENT '{self.name}'."
  1048. return answer
  1049. else:
  1050. return output