# file: https://github.com/generative-computing/mellea/blob/main/docs/examples/agents/react.py#L99
def react(
m: mellea.MelleaSession,
goal: str,
state_description: str | None,
react_toolbox: ReactToolbox,
):
test_ctx_lin = m.ctx.view_for_generation()
assert test_ctx_lin is not None and len(test_ctx_lin) == 0, (
"ReACT expects a fresh context."
)
# Construct the system prompt for ReACT.
_sys_prompt = react_system_template.render(
{"today": datetime.date.today(), "tools": react_toolbox.tools}
)
# Add the system prompt and the goal to the chat history.
m.ctx = m.ctx.add(
mellea.stdlib.chat.Message(role="system", content=_sys_prompt)
).add(mellea.stdlib.chat.Message(role="user", content=f"{goal}"))
# The main ReACT loop as a dynamic program:
# ( ?(not done) ;
# (thought request ; thought response) ;
# (action request ; action response) ;
# (action args request ; action args response) ;
# observation from the tool call ;
# (is done request ; is done response) ;
# { ?(model indicated done) ; emit_final_answer ; done := true }
# )*
done = False
turn_num = 0
while not done:
turn_num += 1
print(f"## ReACT TURN NUMBER {turn_num}")
print("### Thought")
thought = m.chat(
"What should you do next? Respond with a description of the next piece of information you need or the next action you need to take."
)
print(thought.content)
print("### Action")
act = m.chat(
"Choose your next action. Respond with a nothing other than a tool name.",
format=react_toolbox.tool_name_schema(),
)
selected_tool: ReactTool = react_toolbox.get_tool_from_schema(act.content)
print(selected_tool.get_name())
print("### Arguments for action")
act_args = m.chat(
"Choose arguments for the tool. Respond using JSON and include only the tool arguments in your response.",
format=selected_tool.args_schema(),
)
print(f"```json\n{json.dumps(json.loads(act_args.content), indent=2)}\n```")
print("### Observation")
tool_output = react_toolbox.call_tool(selected_tool, act_args.content)
m.ctx = m.ctx.add(mellea.stdlib.chat.Message(role="tool", content=tool_output))
print(tool_output)
print("### Done Check")
is_done = IsDoneModel.model_validate_json(
m.chat(
f"Do you know the answer to the user's original query ({goal})? If so, respond with Yes. If you need to take more actions, then respond No.",
format=IsDoneModel,
).content
).is_done
if is_done:
print("Done. Will summarize and return output now.")
done = True
return m.chat(
f"Please provide your final answer to the original query ({goal})."
).content
else:
print("Not done.")
done = False