ToolsAgent.fromLLMAndTools constructor
ToolsAgent.fromLLMAndTools({
- required BaseChatModel<
ChatModelOptions> llm, - List<
Tool< ? tools,Object, ToolOptions, Object> > - BaseChatMemory? memory,
- SystemChatMessagePromptTemplate systemChatMessage = _systemChatMessagePromptTemplate,
- List<
ChatMessagePromptTemplate> ? extraPromptMessages,
Construct an ToolsAgent from an llm
and tools
.
llm
- The model to use for the agent.tools
- The tools the agent has access to. You can omit this field if you have already configured the tools in thellm
.memory
- The memory to use for the agent.systemChatMessage
message to use as the system message that will be the first in the prompt. Default: "You are a helpful AI assistant".extraPromptMessages
prompt messages that will be placed between the system message and the input from the agent.
Implementation
factory ToolsAgent.fromLLMAndTools({
required final BaseChatModel llm,
final List<Tool>? tools,
final BaseChatMemory? memory,
final SystemChatMessagePromptTemplate systemChatMessage =
_systemChatMessagePromptTemplate,
final List<ChatMessagePromptTemplate>? extraPromptMessages,
}) {
assert(
tools != null || llm.defaultOptions.tools != null,
'Tools must be provided or configured in the llm',
);
assert(
tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool),
'All elements in `tools` must be of type `Tool` or its subclasses',
);
final actualTools = tools ?? llm.defaultOptions.tools!.cast<Tool>();
return ToolsAgent(
llmChain: LLMChain(
llm: llm,
llmOptions: llm.defaultOptions.copyWith(
tools: actualTools,
),
prompt: createPrompt(
systemChatMessage: systemChatMessage,
extraPromptMessages: extraPromptMessages,
memory: memory,
),
memory: memory,
),
tools: actualTools,
);
}