Heroku LangChain.js - v1.0.1
    Preparing search index...

    Class HerokuAgent

    HerokuAgent - Heroku Managed Inference Agent Integration

    A LangChain-compatible chat model that interfaces with Heroku's Managed Inference Agent API. This class provides access to intelligent agents that can execute tools and perform complex multi-step reasoning tasks. Agents have access to Heroku-specific tools like app management, database operations, and can integrate with external services via MCP (Model Context Protocol).

    Unlike the basic ChatHeroku model, agents are designed for autonomous task execution with built-in tool calling capabilities and advanced reasoning patterns.

    // Source: examples/create-heroku-agent-basic.ts
    import { createAgent, createMiddleware } from "langchain";
    import { HumanMessage } from "@langchain/core/messages";
    import { HerokuAgent } from "heroku-langchain";
    import { HerokuAgentToolDefinition } from "heroku-langchain/types";

    const tools: HerokuAgentToolDefinition[] = [
    {
    type: "heroku_tool",
    name: "dyno_run_command",
    runtime_params: {
    target_app_name: process.env.HEROKU_APP_NAME ?? "mia-inference-demo",
    tool_params: {
    cmd: "uname -a",
    description: "Gets the current kernel version on the server.",
    parameters: { type: "object", properties: {} },
    },
    },
    },
    ];

    const model = new HerokuAgent({ tools });
    const loggingMiddleware = createMiddleware({
    name: "LoggingMiddleware",
    wrapModelCall: async (request, handler) => {
    console.log("system prompt", request.systemPrompt ?? "<default>");
    return handler(request);
    },
    });

    const agent = createAgent({
    model,
    tools,
    systemPrompt: "You are a Heroku operator.",
    middleware: [loggingMiddleware],
    });

    const response = await agent.invoke({
    messages: [
    new HumanMessage("What kernel version is running on the app server?"),
    ],
    });
    console.log(response.messages.at(-1)?.content);
    // Source: examples/create-agent-mcp.ts
    import { createAgent } from "langchain";
    import { HumanMessage } from "@langchain/core/messages";
    import { HerokuAgent } from "heroku-langchain";
    import { HerokuAgentToolDefinition } from "heroku-langchain/types";

    const tools: HerokuAgentToolDefinition[] = [
    {
    type: "mcp",
    name: "mcp-brave/brave_web_search",
    },
    ];

    const model = new HerokuAgent({ tools });
    const agent = createAgent({
    model,
    tools,
    systemPrompt:
    "Blend model knowledge with results from mcp-brave/brave_web_search.",
    });

    const response = await agent.invoke({
    messages: [new HumanMessage("What is new in the world of AI?")],
    });

    const finalMessage = response.messages.at(-1);
    console.log(finalMessage?.content);
    console.log(finalMessage?.response_metadata?.tool_calls);
    // Source: examples/create-heroku-agent-streaming.ts
    import { createAgent } from "langchain";
    import { HumanMessage } from "@langchain/core/messages";
    import { HerokuAgent } from "heroku-langchain";
    import { HerokuAgentToolDefinition } from "heroku-langchain/types";

    const tools: HerokuAgentToolDefinition[] = [
    {
    type: "heroku_tool",
    name: "dyno_run_command",
    runtime_params: {
    target_app_name: process.env.HEROKU_APP_NAME ?? "mia-inference-demo",
    tool_params: {
    cmd: "date",
    description: "Gets the current date and time on the server.",
    parameters: { type: "object", properties: {} },
    },
    },
    },
    ];

    const agent = createAgent({
    model: new HerokuAgent({ tools }),
    tools,
    systemPrompt:
    "You are a Heroku operator. Prefer dyno_run_command to inspect the target app.",
    });

    const stream = await agent.stream({
    messages: [
    new HumanMessage(
    "Collect detailed uptime information from the target app.",
    ),
    ],
    });

    for await (const chunk of stream as AsyncIterable<Record<string, any>>) {
    const latestMessage = chunk.messages?.at(-1);
    if (latestMessage?.content) {
    console.log(latestMessage.content);
    }
    if (latestMessage?.response_metadata?.tool_calls?.length) {
    console.log("tool calls", latestMessage.response_metadata.tool_calls);
    }
    }

    Hierarchy

    Index

    Constructors

    • Creates a new HerokuAgent instance.

      Parameters

      • Optionalfields: HerokuAgentFields

        Optional configuration options for the Heroku Mia Agent

      Returns HerokuAgent

      When model ID is not provided and INFERENCE_MODEL_ID environment variable is not set

      // Basic usage with defaults
      const agent = new HerokuAgent();

      // With custom configuration
      const agent = new HerokuAgent({
      model: "gpt-oss-120b",
      temperature: 0.3,
      maxTokensPerRequest: 2000,
      tools: [
      {
      type: "heroku_tool",
      name: "dyno_run_command",
      runtime_params: {
      target_app_name: "my-app",
      tool_params: {
      cmd: "date",
      description: "Gets the current date and time on the server.",
      parameters: { type: "object", properties: {} },
      },
      },
      }
      }
      ],
      apiKey: "your-api-key",
      apiUrl: "https://us.inference.heroku.com"
      });

    Properties

    maxTokensPerRequest?: number
    tools?: any[]
    streamUsage?: boolean
    toolResultQueues: Map<string, any[]> = ...
    _localNoopTools: StructuredTool<ToolInputSchemaBase, any, any, any>[] = []
    resolvedModelId: string

    Actual model ID used when calling Heroku APIs

    model: string

    Public/alias model name exposed to LangChain (can differ from actual ID)

    temperature?: number
    stop?: string[]
    topP?: number
    apiKey?: string
    apiUrl?: string
    maxRetries?: number
    timeout?: number
    streaming?: boolean
    additionalKwargs?: Record<string, any>

    Methods

    • Returns the LangChain identifier for this agent class.

      Returns string

      The string "HerokuAgent"

    • Returns the LLM type identifier for this agent.

      Returns string

      The string "HerokuAgent"

    • Internal

      Get the parameters used to invoke the agent.

      This method combines constructor parameters with runtime options to create the final request parameters for the Heroku Agent API. Runtime options take precedence over constructor parameters.

      Parameters

      • Optionaloptions: Partial<HerokuAgentCallOptions>

        Optional runtime parameters that override constructor defaults

      Returns Omit<
          HerokuAgentFields,
          "outputVersion"
          | "disableStreaming"
          | (keyof BaseLanguageModelParams),
      > & { [key: string]: any }

      Combined parameters for the agent API request

    • Parameters

      • messages: BaseMessage<MessageStructure, MessageType>[]
      • options: Omit<
            HerokuAgentCallOptions,
            | "configurable"
            | "recursionLimit"
            | "runName"
            | "tags"
            | "metadata"
            | "callbacks"
            | "runId",
        >
      • OptionalrunManager: CallbackManagerForLLMRun

      Returns Promise<ChatResult>

    • Parameters

      • messages: BaseMessage<MessageStructure, MessageType>[]
      • options: Omit<
            HerokuAgentCallOptions,
            | "configurable"
            | "recursionLimit"
            | "runName"
            | "tags"
            | "metadata"
            | "callbacks"
            | "runId",
        >
      • OptionalrunManager: CallbackManagerForLLMRun

      Returns AsyncGenerator<
          AIMessageChunk<MessageStructure>
          | ToolMessageChunk<MessageStructure>,
      >

    • LangChain streaming hook. Wraps _stream to emit ChatGenerationChunk objects so BaseChatModel.stream() stays on the streaming path.

      Parameters

      • messages: BaseMessage<MessageStructure, MessageType>[]
      • options: Omit<
            HerokuAgentCallOptions,
            | "configurable"
            | "recursionLimit"
            | "runName"
            | "tags"
            | "metadata"
            | "callbacks"
            | "runId",
        >
      • OptionalrunManager: CallbackManagerForLLMRun

      Returns AsyncGenerator<ChatGenerationChunk>

    • Create a version of this agent that returns structured output by instructing the model to produce JSON matching the schema (jsonMode-style).

      Type Parameters

      • RunOutput extends Record<string, any> = Record<string, any>
      • IncludeRaw extends boolean = false

      Parameters

      • schemaOrParams:
            | Record<string, any>
            | ZodType<RunOutput, unknown, $ZodTypeInternals<RunOutput, unknown>>
            | {
                schema:
                    | Record<string, any>
                    | ZodType<RunOutput, unknown, $ZodTypeInternals<RunOutput, unknown>>;
                name?: string;
                description?: string;
                method?: "functionCalling" | "jsonMode";
                includeRaw?: IncludeRaw;
            }
      • OptionalmaybeOptions: {
            name?: string;
            description?: string;
            method?: "functionCalling" | "jsonMode";
            includeRaw?: IncludeRaw;
        }

      Returns any

    • Get the locally bound no-op LangChain tools mirroring server tools.

      Returns Tool<any>[]

    • Push a server-provided tool result into the local queue for a tool name

      Parameters

      • toolName: string | undefined
      • result: any

      Returns void

    • Consume the oldest queued server tool result for a tool name

      Parameters

      • toolName: string

      Returns any

    • Remove undefined keys to keep payloads clean

      Type Parameters

      • T extends Record<string, any>

      Parameters

      • obj: T

      Returns T

    • Standard headers for Heroku API calls

      Parameters

      • apiKey: string

      Returns Record<string, string>

    • POST JSON with retries, timeout, and consistent error wrapping.

      Parameters

      • url: string
      • apiKey: string
      • body: Record<string, any>

      Returns Promise<Response>

    • Returns string