ToolsAgent.fromLLMAndTools constructor

ToolsAgent.fromLLMAndTools({
  1. required BaseChatModel<ChatModelOptions> llm,
  2. List<Tool<Object, ToolOptions, Object>>? tools,
  3. BaseChatMemory? memory,
  4. SystemChatMessagePromptTemplate systemChatMessage = _systemChatMessagePromptTemplate,
  5. List<ChatMessagePromptTemplate>? extraPromptMessages,
})

Construct an ToolsAgent from an llm and tools.

  • llm - The model to use for the agent.
  • tools - The tools the agent has access to. You can omit this field if you have already configured the tools in the llm.
  • memory - The memory to use for the agent.
  • systemChatMessage message to use as the system message that will be the first in the prompt. Default: "You are a helpful AI assistant".
  • extraPromptMessages prompt messages that will be placed between the system message and the input from the agent.

Implementation

factory ToolsAgent.fromLLMAndTools({
  required final BaseChatModel llm,
  final List<Tool>? tools,
  final BaseChatMemory? memory,
  final SystemChatMessagePromptTemplate systemChatMessage =
      _systemChatMessagePromptTemplate,
  final List<ChatMessagePromptTemplate>? extraPromptMessages,
}) {
  assert(
    tools != null || llm.defaultOptions.tools != null,
    'Tools must be provided or configured in the llm',
  );
  assert(
    tools != null || llm.defaultOptions.tools!.every((tool) => tool is Tool),
    'All elements in `tools` must be of type `Tool` or its subclasses',
  );

  final actualTools = tools ?? llm.defaultOptions.tools!.cast<Tool>();

  return ToolsAgent(
    llmChain: LLMChain(
      llm: llm,
      llmOptions: llm.defaultOptions.copyWith(
        tools: actualTools,
      ),
      prompt: createPrompt(
        systemChatMessage: systemChatMessage,
        extraPromptMessages: extraPromptMessages,
        memory: memory,
      ),
      memory: memory,
    ),
    tools: actualTools,
  );
}