import { z } from "zod"; import { type TraceableFunction } from "langsmith/singletons/traceable"; import type { RunnableInterface, RunnableBatchOptions, RunnableConfig } from "./types.js"; import { CallbackManagerForChainRun } from "../callbacks/manager.js"; import { LogStreamCallbackHandler, LogStreamCallbackHandlerInput, RunLogPatch } from "../tracers/log_stream.js"; import { EventStreamCallbackHandlerInput, StreamEvent } from "../tracers/event_stream.js"; import { Serializable } from "../load/serializable.js"; import { IterableReadableStream } from "../utils/stream.js"; import { Run } from "../tracers/base.js"; import { Graph } from "./graph.js"; import { ToolCall } from "../messages/tool.js"; export { type RunnableInterface, RunnableBatchOptions }; export type RunnableFunc = (input: RunInput, options: CallOptions | Record | (Record & CallOptions)) => RunOutput | Promise; export type RunnableMapLike = { [K in keyof RunOutput]: RunnableLike; }; export type RunnableLike = RunnableInterface | RunnableFunc | RunnableMapLike; export type RunnableRetryFailedAttemptHandler = (error: any, input: any) => any; export declare function _coerceToDict(value: any, defaultKey: string): any; /** * A Runnable is a generic unit of work that can be invoked, batched, streamed, and/or * transformed. */ export declare abstract class Runnable extends Serializable implements RunnableInterface { protected lc_runnable: boolean; name?: string; getName(suffix?: string): string; abstract invoke(input: RunInput, options?: Partial): Promise; /** * Bind arguments to a Runnable, returning a new Runnable. * @param kwargs * @returns A new RunnableBinding that, when invoked, will apply the bound args. * * @deprecated Use {@link withConfig} instead. This will be removed in the next breaking release. */ bind(kwargs: Partial): Runnable; /** * Return a new Runnable that maps a list of inputs to a list of outputs, * by calling invoke() with each input. * * @deprecated This will be removed in the next breaking release. */ map(): Runnable; /** * Add retry logic to an existing runnable. * @param fields.stopAfterAttempt The number of attempts to retry. * @param fields.onFailedAttempt A function that is called when a retry fails. * @returns A new RunnableRetry that, when invoked, will retry according to the parameters. */ withRetry(fields?: { stopAfterAttempt?: number; onFailedAttempt?: RunnableRetryFailedAttemptHandler; }): RunnableRetry; /** * Bind config to a Runnable, returning a new Runnable. * @param config New configuration parameters to attach to the new runnable. * @returns A new RunnableBinding with a config matching what's passed. */ withConfig(config: Partial): Runnable; /** * Create a new runnable from the current one that will try invoking * other passed fallback runnables if the initial invocation fails. * @param fields.fallbacks Other runnables to call if the runnable errors. * @returns A new RunnableWithFallbacks. */ withFallbacks(fields: { fallbacks: Runnable[]; } | Runnable[]): RunnableWithFallbacks; protected _getOptionsList(options: Partial | Partial[], length?: number): Partial[]; /** * Default implementation of batch, which calls invoke N times. * Subclasses should override this method if they can batch more efficiently. * @param inputs Array of inputs to each batch call. * @param options Either a single call options object to apply to each batch call or an array for each call. * @param batchOptions.returnExceptions Whether to return errors rather than throwing on the first one * @returns An array of RunOutputs, or mixed RunOutputs and errors if batchOptions.returnExceptions is set */ batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false; }): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions: true; }): Promise<(RunOutput | Error)[]>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; /** * Default streaming implementation. * Subclasses should override this method if they support streaming output. * @param input * @param options */ _streamIterator(input: RunInput, options?: Partial): AsyncGenerator; /** * Stream output in chunks. * @param input * @param options * @returns A readable stream that is also an iterable. */ stream(input: RunInput, options?: Partial): Promise>; protected _separateRunnableConfigFromCallOptions(options?: Partial): [RunnableConfig, Omit, keyof RunnableConfig>]; protected _callWithConfig(func: ((input: T) => Promise) | ((input: T, config?: Partial, runManager?: CallbackManagerForChainRun) => Promise), input: T, options?: Partial & { runType?: string; }): Promise; /** * Internal method that handles batching and configuration for a runnable * It takes a function, input values, and optional configuration, and * returns a promise that resolves to the output values. * @param func The function to be executed for each input value. * @param input The input values to be processed. * @param config Optional configuration for the function execution. * @returns A promise that resolves to the output values. */ _batchWithConfig(func: (inputs: T[], options?: Partial[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions) => Promise<(RunOutput | Error)[]>, inputs: T[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; /** * Helper method to transform an Iterator of Input values into an Iterator of * Output values, with callbacks. * Use this to implement `stream()` or `transform()` in Runnable subclasses. */ protected _transformStreamWithConfig(inputGenerator: AsyncGenerator, transformer: (generator: AsyncGenerator, runManager?: CallbackManagerForChainRun, options?: Partial) => AsyncGenerator, options?: Partial & { runType?: string; }): AsyncGenerator; getGraph(_?: RunnableConfig): Graph; /** * Create a new runnable sequence that runs each individual runnable in series, * piping the output of one runnable into another runnable or runnable-like. * @param coerceable A runnable, function, or object whose values are functions or runnables. * @returns A new runnable sequence. */ pipe(coerceable: RunnableLike): Runnable>; /** * Pick keys from the dict output of this runnable. Returns a new runnable. */ pick(keys: string | string[]): Runnable; /** * Assigns new fields to the dict output of this runnable. Returns a new runnable. */ assign(mapping: RunnableMapLike, Record>): Runnable; /** * Default implementation of transform, which buffers input and then calls stream. * Subclasses should override this method if they can start producing output while * input is still being generated. * @param generator * @param options */ transform(generator: AsyncGenerator, options: Partial): AsyncGenerator; /** * Stream all output from a runnable, as reported to the callback system. * This includes all inner runs of LLMs, Retrievers, Tools, etc. * Output is streamed as Log objects, which include a list of * jsonpatch ops that describe how the state of the run has changed in each * step, and the final state of the run. * The jsonpatch ops can be applied in order to construct state. * @param input * @param options * @param streamOptions */ streamLog(input: RunInput, options?: Partial, streamOptions?: Omit): AsyncGenerator; protected _streamLog(input: RunInput, logStreamCallbackHandler: LogStreamCallbackHandler, config: Partial): AsyncGenerator; /** * Generate a stream of events emitted by the internal steps of the runnable. * * Use to create an iterator over StreamEvents that provide real-time information * about the progress of the runnable, including StreamEvents from intermediate * results. * * A StreamEvent is a dictionary with the following schema: * * - `event`: string - Event names are of the format: on_[runnable_type]_(start|stream|end). * - `name`: string - The name of the runnable that generated the event. * - `run_id`: string - Randomly generated ID associated with the given execution of * the runnable that emitted the event. A child runnable that gets invoked as part of the execution of a * parent runnable is assigned its own unique ID. * - `tags`: string[] - The tags of the runnable that generated the event. * - `metadata`: Record - The metadata of the runnable that generated the event. * - `data`: Record * * Below is a table that illustrates some events that might be emitted by various * chains. Metadata fields have been omitted from the table for brevity. * Chain definitions have been included after the table. * * **ATTENTION** This reference table is for the V2 version of the schema. * * ```md * +----------------------+-----------------------------+------------------------------------------+ * | event | input | output/chunk | * +======================+=============================+==========================================+ * | on_chat_model_start | {"messages": BaseMessage[]} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_chat_model_stream | | AIMessageChunk("hello") | * +----------------------+-----------------------------+------------------------------------------+ * | on_chat_model_end | {"messages": BaseMessage[]} | AIMessageChunk("hello world") | * +----------------------+-----------------------------+------------------------------------------+ * | on_llm_start | {'input': 'hello'} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_llm_stream | | 'Hello' | * +----------------------+-----------------------------+------------------------------------------+ * | on_llm_end | 'Hello human!' | | * +----------------------+-----------------------------+------------------------------------------+ * | on_chain_start | | | * +----------------------+-----------------------------+------------------------------------------+ * | on_chain_stream | | "hello world!" | * +----------------------+-----------------------------+------------------------------------------+ * | on_chain_end | [Document(...)] | "hello world!, goodbye world!" | * +----------------------+-----------------------------+------------------------------------------+ * | on_tool_start | {"x": 1, "y": "2"} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_tool_end | | {"x": 1, "y": "2"} | * +----------------------+-----------------------------+------------------------------------------+ * | on_retriever_start | {"query": "hello"} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_retriever_end | {"query": "hello"} | [Document(...), ..] | * +----------------------+-----------------------------+------------------------------------------+ * | on_prompt_start | {"question": "hello"} | | * +----------------------+-----------------------------+------------------------------------------+ * | on_prompt_end | {"question": "hello"} | ChatPromptValue(messages: BaseMessage[]) | * +----------------------+-----------------------------+------------------------------------------+ * ``` * * The "on_chain_*" events are the default for Runnables that don't fit one of the above categories. * * In addition to the standard events above, users can also dispatch custom events. * * Custom events will be only be surfaced with in the `v2` version of the API! * * A custom event has following format: * * ```md * +-----------+------+------------------------------------------------------------+ * | Attribute | Type | Description | * +===========+======+============================================================+ * | name | str | A user defined name for the event. | * +-----------+------+------------------------------------------------------------+ * | data | Any | The data associated with the event. This can be anything. | * +-----------+------+------------------------------------------------------------+ * ``` * * Here's an example: * * ```ts * import { RunnableLambda } from "@langchain/core/runnables"; * import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; * // Use this import for web environments that don't support "async_hooks" * // and manually pass config to child runs. * // import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch/web"; * * const slowThing = RunnableLambda.from(async (someInput: string) => { * // Placeholder for some slow operation * await new Promise((resolve) => setTimeout(resolve, 100)); * await dispatchCustomEvent("progress_event", { * message: "Finished step 1 of 2", * }); * await new Promise((resolve) => setTimeout(resolve, 100)); * return "Done"; * }); * * const eventStream = await slowThing.streamEvents("hello world", { * version: "v2", * }); * * for await (const event of eventStream) { * if (event.event === "on_custom_event") { * console.log(event); * } * } * ``` */ streamEvents(input: RunInput, options: Partial & { version: "v1" | "v2"; }, streamOptions?: Omit): IterableReadableStream; streamEvents(input: RunInput, options: Partial & { version: "v1" | "v2"; encoding: "text/event-stream"; }, streamOptions?: Omit): IterableReadableStream; private _streamEventsV2; private _streamEventsV1; static isRunnable(thing: any): thing is Runnable; /** * Bind lifecycle listeners to a Runnable, returning a new Runnable. * The Run object contains information about the run, including its id, * type, input, output, error, startTime, endTime, and any tags or metadata * added to the run. * * @param {Object} params - The object containing the callback functions. * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. */ withListeners({ onStart, onEnd, onError, }: { onStart?: (run: Run, config?: RunnableConfig) => void | Promise; onEnd?: (run: Run, config?: RunnableConfig) => void | Promise; onError?: (run: Run, config?: RunnableConfig) => void | Promise; }): Runnable; /** * Convert a runnable to a tool. Return a new instance of `RunnableToolLike` * which contains the runnable, name, description and schema. * * @template {T extends RunInput = RunInput} RunInput - The input type of the runnable. Should be the same as the `RunInput` type of the runnable. * * @param fields * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable. * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided. * @param {z.ZodType} [fields.schema] The Zod schema for the input of the tool. Infers the Zod type from the input type of the runnable. * @returns {RunnableToolLike, RunOutput>} An instance of `RunnableToolLike` which is a runnable that can be used as a tool. */ asTool(fields: { name?: string; description?: string; schema: z.ZodType; }): RunnableToolLike, RunOutput>; } export type RunnableBindingArgs = { bound: Runnable; /** * @deprecated use {@link config} instead */ kwargs?: Partial; config: RunnableConfig; configFactories?: Array<(config: RunnableConfig) => RunnableConfig | Promise>; }; /** * Wraps a runnable and applies partial config upon invocation. * * @example * ```typescript * import { * type RunnableConfig, * RunnableLambda, * } from "@langchain/core/runnables"; * * const enhanceProfile = ( * profile: Record, * config?: RunnableConfig * ) => { * if (config?.configurable?.role) { * return { ...profile, role: config.configurable.role }; * } * return profile; * }; * * const runnable = RunnableLambda.from(enhanceProfile); * * // Bind configuration to the runnable to set the user's role dynamically * const adminRunnable = runnable.bind({ configurable: { role: "Admin" } }); * const userRunnable = runnable.bind({ configurable: { role: "User" } }); * * const result1 = await adminRunnable.invoke({ * name: "Alice", * email: "alice@example.com" * }); * * // { name: "Alice", email: "alice@example.com", role: "Admin" } * * const result2 = await userRunnable.invoke({ * name: "Bob", * email: "bob@example.com" * }); * * // { name: "Bob", email: "bob@example.com", role: "User" } * ``` */ export declare class RunnableBinding extends Runnable { static lc_name(): string; lc_namespace: string[]; lc_serializable: boolean; bound: Runnable; config: RunnableConfig; kwargs?: Partial; configFactories?: Array<(config: RunnableConfig) => RunnableConfig | Promise>; constructor(fields: RunnableBindingArgs); getName(suffix?: string | undefined): string; _mergeConfig(...options: (Partial | RunnableConfig | undefined)[]): Promise>; /** * Binds the runnable with the specified arguments. * @param kwargs The arguments to bind the runnable with. * @returns A new instance of the `RunnableBinding` class that is bound with the specified arguments. * * @deprecated Use {@link withConfig} instead. This will be removed in the next breaking release. */ bind(kwargs: Partial): RunnableBinding; withConfig(config: Partial): Runnable; withRetry(fields?: { stopAfterAttempt?: number; onFailedAttempt?: RunnableRetryFailedAttemptHandler; }): RunnableRetry; invoke(input: RunInput, options?: Partial): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false; }): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions: true; }): Promise<(RunOutput | Error)[]>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; _streamIterator(input: RunInput, options?: Partial | undefined): AsyncGenerator, void, unknown>; stream(input: RunInput, options?: Partial | undefined): Promise>; transform(generator: AsyncGenerator, options?: Partial): AsyncGenerator; streamEvents(input: RunInput, options: Partial & { version: "v1" | "v2"; }, streamOptions?: Omit): IterableReadableStream; streamEvents(input: RunInput, options: Partial & { version: "v1" | "v2"; encoding: "text/event-stream"; }, streamOptions?: Omit): IterableReadableStream; static isRunnableBinding(thing: any): thing is RunnableBinding; /** * Bind lifecycle listeners to a Runnable, returning a new Runnable. * The Run object contains information about the run, including its id, * type, input, output, error, startTime, endTime, and any tags or metadata * added to the run. * * @param {Object} params - The object containing the callback functions. * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. */ withListeners({ onStart, onEnd, onError, }: { onStart?: (run: Run, config?: RunnableConfig) => void | Promise; onEnd?: (run: Run, config?: RunnableConfig) => void | Promise; onError?: (run: Run, config?: RunnableConfig) => void | Promise; }): Runnable; } /** * A runnable that delegates calls to another runnable * with each element of the input sequence. * @example * ```typescript * import { RunnableEach, RunnableLambda } from "@langchain/core/runnables"; * * const toUpperCase = (input: string): string => input.toUpperCase(); * const addGreeting = (input: string): string => `Hello, ${input}!`; * * const upperCaseLambda = RunnableLambda.from(toUpperCase); * const greetingLambda = RunnableLambda.from(addGreeting); * * const chain = new RunnableEach({ * bound: upperCaseLambda.pipe(greetingLambda), * }); * * const result = await chain.invoke(["alice", "bob", "carol"]) * * // ["Hello, ALICE!", "Hello, BOB!", "Hello, CAROL!"] * ``` * * @deprecated This will be removed in the next breaking release. */ export declare class RunnableEach extends Runnable { static lc_name(): string; lc_serializable: boolean; lc_namespace: string[]; bound: Runnable; constructor(fields: { bound: Runnable; }); /** * Binds the runnable with the specified arguments. * @param kwargs The arguments to bind the runnable with. * @returns A new instance of the `RunnableEach` class that is bound with the specified arguments. * * @deprecated Use {@link withConfig} instead. This will be removed in the next breaking release. */ bind(kwargs: Partial): RunnableEach; /** * Invokes the runnable with the specified input and configuration. * @param input The input to invoke the runnable with. * @param config The configuration to invoke the runnable with. * @returns A promise that resolves to the output of the runnable. */ invoke(inputs: RunInputItem[], config?: Partial): Promise; /** * A helper method that is used to invoke the runnable with the specified input and configuration. * @param input The input to invoke the runnable with. * @param config The configuration to invoke the runnable with. * @returns A promise that resolves to the output of the runnable. */ protected _invoke(inputs: RunInputItem[], config?: Partial, runManager?: CallbackManagerForChainRun): Promise; /** * Bind lifecycle listeners to a Runnable, returning a new Runnable. * The Run object contains information about the run, including its id, * type, input, output, error, startTime, endTime, and any tags or metadata * added to the run. * * @param {Object} params - The object containing the callback functions. * @param {(run: Run) => void} params.onStart - Called before the runnable starts running, with the Run object. * @param {(run: Run) => void} params.onEnd - Called after the runnable finishes running, with the Run object. * @param {(run: Run) => void} params.onError - Called if the runnable throws an error, with the Run object. */ withListeners({ onStart, onEnd, onError, }: { onStart?: (run: Run, config?: RunnableConfig) => void | Promise; onEnd?: (run: Run, config?: RunnableConfig) => void | Promise; onError?: (run: Run, config?: RunnableConfig) => void | Promise; }): Runnable; } /** * Base class for runnables that can be retried a * specified number of times. * @example * ```typescript * import { * RunnableLambda, * RunnableRetry, * } from "@langchain/core/runnables"; * * // Simulate an API call that fails * const simulateApiCall = (input: string): string => { * console.log(`Attempting API call with input: ${input}`); * throw new Error("API call failed due to network issue"); * }; * * const apiCallLambda = RunnableLambda.from(simulateApiCall); * * // Apply retry logic using the .withRetry() method * const apiCallWithRetry = apiCallLambda.withRetry({ stopAfterAttempt: 3 }); * * // Alternatively, create a RunnableRetry instance manually * const manualRetry = new RunnableRetry({ * bound: apiCallLambda, * maxAttemptNumber: 3, * config: {}, * }); * * // Example invocation using the .withRetry() method * const res = await apiCallWithRetry * .invoke("Request 1") * .catch((error) => { * console.error("Failed after multiple retries:", error.message); * }); * * // Example invocation using the manual retry instance * const res2 = await manualRetry * .invoke("Request 2") * .catch((error) => { * console.error("Failed after multiple retries:", error.message); * }); * ``` */ export declare class RunnableRetry extends RunnableBinding { static lc_name(): string; lc_namespace: string[]; protected maxAttemptNumber: number; onFailedAttempt: RunnableRetryFailedAttemptHandler; constructor(fields: RunnableBindingArgs & { maxAttemptNumber?: number; onFailedAttempt?: RunnableRetryFailedAttemptHandler; }); _patchConfigForRetry(attempt: number, config?: Partial, runManager?: CallbackManagerForChainRun): Partial; protected _invoke(input: RunInput, config?: CallOptions, runManager?: CallbackManagerForChainRun): Promise; /** * Method that invokes the runnable with the specified input, run manager, * and config. It handles the retry logic by catching any errors and * recursively invoking itself with the updated config for the next retry * attempt. * @param input The input for the runnable. * @param runManager The run manager for the runnable. * @param config The config for the runnable. * @returns A promise that resolves to the output of the runnable. */ invoke(input: RunInput, config?: CallOptions): Promise; _batch(inputs: RunInput[], configs?: RunnableConfig[], runManagers?: (CallbackManagerForChainRun | undefined)[], batchOptions?: RunnableBatchOptions): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false; }): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions: true; }): Promise<(RunOutput | Error)[]>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; } export type RunnableSequenceFields = { first: Runnable; middle?: Runnable[]; last: Runnable; name?: string; omitSequenceTags?: boolean; }; /** * A sequence of runnables, where the output of each is the input of the next. * @example * ```typescript * const promptTemplate = PromptTemplate.fromTemplate( * "Tell me a joke about {topic}", * ); * const chain = RunnableSequence.from([promptTemplate, new ChatOpenAI({})]); * const result = await chain.invoke({ topic: "bears" }); * ``` */ export declare class RunnableSequence extends Runnable { static lc_name(): string; protected first: Runnable; protected middle: Runnable[]; protected last: Runnable; omitSequenceTags: boolean; lc_serializable: boolean; lc_namespace: string[]; constructor(fields: RunnableSequenceFields); get steps(): Runnable>>[]; invoke(input: RunInput, options?: RunnableConfig): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false; }): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions: true; }): Promise<(RunOutput | Error)[]>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; _streamIterator(input: RunInput, options?: RunnableConfig): AsyncGenerator; getGraph(config?: RunnableConfig): Graph; pipe(coerceable: RunnableLike): RunnableSequence>; static isRunnableSequence(thing: any): thing is RunnableSequence; static from([first, ...runnables]: [ RunnableLike, ...RunnableLike[], RunnableLike ], nameOrFields?: string | Omit, "first" | "middle" | "last">): RunnableSequence>; } /** * A runnable that runs a mapping of runnables in parallel, * and returns a mapping of their outputs. * @example * ```typescript * const mapChain = RunnableMap.from({ * joke: PromptTemplate.fromTemplate("Tell me a joke about {topic}").pipe( * new ChatAnthropic({}), * ), * poem: PromptTemplate.fromTemplate("write a 2-line poem about {topic}").pipe( * new ChatAnthropic({}), * ), * }); * const result = await mapChain.invoke({ topic: "bear" }); * ``` */ export declare class RunnableMap = Record> extends Runnable { static lc_name(): string; lc_namespace: string[]; lc_serializable: boolean; protected steps: Record>; getStepsKeys(): string[]; constructor(fields: { steps: RunnableMapLike; }); static from = Record>(steps: RunnableMapLike): RunnableMap; invoke(input: RunInput, options?: Partial): Promise; _transform(generator: AsyncGenerator, runManager?: CallbackManagerForChainRun, options?: Partial): AsyncGenerator; transform(generator: AsyncGenerator, options?: Partial): AsyncGenerator; stream(input: RunInput, options?: Partial): Promise>; } type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>; /** * A runnable that wraps a traced LangSmith function. */ export declare class RunnableTraceable extends Runnable { lc_serializable: boolean; lc_namespace: string[]; protected func: AnyTraceableFunction; constructor(fields: { func: AnyTraceableFunction; }); invoke(input: RunInput, options?: Partial): Promise; _streamIterator(input: RunInput, options?: Partial): AsyncGenerator; static from(func: AnyTraceableFunction): RunnableTraceable; } /** * A runnable that wraps an arbitrary function that takes a single argument. * @example * ```typescript * import { RunnableLambda } from "@langchain/core/runnables"; * * const add = (input: { x: number; y: number }) => input.x + input.y; * * const multiply = (input: { value: number; multiplier: number }) => * input.value * input.multiplier; * * // Create runnables for the functions * const addLambda = RunnableLambda.from(add); * const multiplyLambda = RunnableLambda.from(multiply); * * // Chain the lambdas for a mathematical operation * const chainedLambda = addLambda.pipe((result) => * multiplyLambda.invoke({ value: result, multiplier: 2 }) * ); * * // Example invocation of the chainedLambda * const result = await chainedLambda.invoke({ x: 2, y: 3 }); * * // Will log "10" (since (2 + 3) * 2 = 10) * ``` */ export declare class RunnableLambda extends Runnable { static lc_name(): string; lc_namespace: string[]; protected func: RunnableFunc, CallOptions>; constructor(fields: { func: RunnableFunc, CallOptions> | TraceableFunction, CallOptions>>; }); static from(func: RunnableFunc, CallOptions>): RunnableLambda; static from(func: TraceableFunction, CallOptions>>): RunnableLambda; _invoke(input: RunInput, config?: Partial, runManager?: CallbackManagerForChainRun): Promise; invoke(input: RunInput, options?: Partial): Promise; _transform(generator: AsyncGenerator, runManager?: CallbackManagerForChainRun, config?: Partial): AsyncGenerator; transform(generator: AsyncGenerator, options?: Partial): AsyncGenerator; stream(input: RunInput, options?: Partial): Promise>; } /** * A runnable that runs a mapping of runnables in parallel, * and returns a mapping of their outputs. * @example * ```typescript * import { * RunnableLambda, * RunnableParallel, * } from "@langchain/core/runnables"; * * const addYears = (age: number): number => age + 5; * const yearsToFifty = (age: number): number => 50 - age; * const yearsToHundred = (age: number): number => 100 - age; * * const addYearsLambda = RunnableLambda.from(addYears); * const milestoneFiftyLambda = RunnableLambda.from(yearsToFifty); * const milestoneHundredLambda = RunnableLambda.from(yearsToHundred); * * // Pipe will coerce objects into RunnableParallel by default, but we * // explicitly instantiate one here to demonstrate * const sequence = addYearsLambda.pipe( * RunnableParallel.from({ * years_to_fifty: milestoneFiftyLambda, * years_to_hundred: milestoneHundredLambda, * }) * ); * * // Invoke the sequence with a single age input * const res = sequence.invoke(25); * * // { years_to_fifty: 25, years_to_hundred: 75 } * ``` */ export declare class RunnableParallel extends RunnableMap { } /** * A Runnable that can fallback to other Runnables if it fails. * External APIs (e.g., APIs for a language model) may at times experience * degraded performance or even downtime. * * In these cases, it can be useful to have a fallback Runnable that can be * used in place of the original Runnable (e.g., fallback to another LLM provider). * * Fallbacks can be defined at the level of a single Runnable, or at the level * of a chain of Runnables. Fallbacks are tried in order until one succeeds or * all fail. * * While you can instantiate a `RunnableWithFallbacks` directly, it is usually * more convenient to use the `withFallbacks` method on an existing Runnable. * * When streaming, fallbacks will only be called on failures during the initial * stream creation. Errors that occur after a stream starts will not fallback * to the next Runnable. * * @example * ```typescript * import { * RunnableLambda, * RunnableWithFallbacks, * } from "@langchain/core/runnables"; * * const primaryOperation = (input: string): string => { * if (input !== "safe") { * throw new Error("Primary operation failed due to unsafe input"); * } * return `Processed: ${input}`; * }; * * // Define a fallback operation that processes the input differently * const fallbackOperation = (input: string): string => * `Fallback processed: ${input}`; * * const primaryRunnable = RunnableLambda.from(primaryOperation); * const fallbackRunnable = RunnableLambda.from(fallbackOperation); * * // Apply the fallback logic using the .withFallbacks() method * const runnableWithFallback = primaryRunnable.withFallbacks([fallbackRunnable]); * * // Alternatively, create a RunnableWithFallbacks instance manually * const manualFallbackChain = new RunnableWithFallbacks({ * runnable: primaryRunnable, * fallbacks: [fallbackRunnable], * }); * * // Example invocation using .withFallbacks() * const res = await runnableWithFallback * .invoke("unsafe input") * .catch((error) => { * console.error("Failed after all attempts:", error.message); * }); * * // "Fallback processed: unsafe input" * * // Example invocation using manual instantiation * const res = await manualFallbackChain * .invoke("safe") * .catch((error) => { * console.error("Failed after all attempts:", error.message); * }); * * // "Processed: safe" * ``` */ export declare class RunnableWithFallbacks extends Runnable { static lc_name(): string; lc_namespace: string[]; lc_serializable: boolean; runnable: Runnable; fallbacks: Runnable[]; constructor(fields: { runnable: Runnable; fallbacks: Runnable[]; }); runnables(): Generator>>, void, unknown>; invoke(input: RunInput, options?: Partial): Promise; _streamIterator(input: RunInput, options?: Partial | undefined): AsyncGenerator; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false; }): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions: true; }): Promise<(RunOutput | Error)[]>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(RunOutput | Error)[]>; } export declare function _coerceToRunnable(coerceable: RunnableLike): Runnable, CallOptions>; export interface RunnableAssignFields { mapper: RunnableMap; } /** * A runnable that assigns key-value pairs to inputs of type `Record`. * @example * ```typescript * import { * RunnableAssign, * RunnableLambda, * RunnableParallel, * } from "@langchain/core/runnables"; * * const calculateAge = (x: { birthYear: number }): { age: number } => { * const currentYear = new Date().getFullYear(); * return { age: currentYear - x.birthYear }; * }; * * const createGreeting = (x: { name: string }): { greeting: string } => { * return { greeting: `Hello, ${x.name}!` }; * }; * * const mapper = RunnableParallel.from({ * age_step: RunnableLambda.from(calculateAge), * greeting_step: RunnableLambda.from(createGreeting), * }); * * const runnableAssign = new RunnableAssign({ mapper }); * * const res = await runnableAssign.invoke({ name: "Alice", birthYear: 1990 }); * * // { name: "Alice", birthYear: 1990, age_step: { age: 34 }, greeting_step: { greeting: "Hello, Alice!" } } * ``` */ export declare class RunnableAssign = Record, RunOutput extends Record = Record, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable implements RunnableAssignFields { static lc_name(): string; lc_namespace: string[]; lc_serializable: boolean; mapper: RunnableMap; constructor(fields: RunnableMap | RunnableAssignFields); invoke(input: RunInput, options?: Partial): Promise; _transform(generator: AsyncGenerator, runManager?: CallbackManagerForChainRun, options?: Partial): AsyncGenerator; transform(generator: AsyncGenerator, options?: Partial): AsyncGenerator; stream(input: RunInput, options?: Partial): Promise>; } export interface RunnablePickFields { keys: string | string[]; } /** * A runnable that assigns key-value pairs to inputs of type `Record`. * Useful for streaming, can be automatically created and chained by calling `runnable.pick();`. * @example * ```typescript * import { RunnablePick } from "@langchain/core/runnables"; * * const inputData = { * name: "John", * age: 30, * city: "New York", * country: "USA", * email: "john.doe@example.com", * phone: "+1234567890", * }; * * const basicInfoRunnable = new RunnablePick(["name", "city"]); * * // Example invocation * const res = await basicInfoRunnable.invoke(inputData); * * // { name: 'John', city: 'New York' } * ``` */ export declare class RunnablePick = Record, RunOutput extends Record | any = Record | any, CallOptions extends RunnableConfig = RunnableConfig> extends Runnable implements RunnablePickFields { static lc_name(): string; lc_namespace: string[]; lc_serializable: boolean; keys: string | string[]; constructor(fields: string | string[] | RunnablePickFields); _pick(input: RunInput): Promise; invoke(input: RunInput, options?: Partial): Promise; _transform(generator: AsyncGenerator): AsyncGenerator; transform(generator: AsyncGenerator, options?: Partial): AsyncGenerator; stream(input: RunInput, options?: Partial): Promise>; } export interface RunnableToolLikeArgs extends Omit, RunOutput>, "config"> { name: string; description?: string; schema: RunInput; config?: RunnableConfig; } export declare class RunnableToolLike extends RunnableBinding, RunOutput> { name: string; description?: string; schema: RunInput; constructor(fields: RunnableToolLikeArgs); static lc_name(): string; } /** * Given a runnable and a Zod schema, convert the runnable to a tool. * * @template RunInput The input type for the runnable. * @template RunOutput The output type for the runnable. * * @param {Runnable} runnable The runnable to convert to a tool. * @param fields * @param {string | undefined} [fields.name] The name of the tool. If not provided, it will default to the name of the runnable. * @param {string | undefined} [fields.description] The description of the tool. Falls back to the description on the Zod schema if not provided, or undefined if neither are provided. * @param {z.ZodType} [fields.schema] The Zod schema for the input of the tool. Infers the Zod type from the input type of the runnable. * @returns {RunnableToolLike, RunOutput>} An instance of `RunnableToolLike` which is a runnable that can be used as a tool. */ export declare function convertRunnableToTool(runnable: Runnable, fields: { name?: string; description?: string; schema: z.ZodType; }): RunnableToolLike, RunOutput>;