BedrockChat & GeminiChat (#186809)

## Summary

Adopted `BedrockChat` from `@langchain/community` package that adds
support for tools calling
https://js.langchain.com/v0.2/docs/integrations/chat/bedrock/

Adopted `ChatGoogleGenerativeAI ` from `@langchain/google-genai` package
that adds support for tools calling
https://js.langchain.com/v0.2/docs/integrations/chat/google_generativeai

Hidden behind FF:
`--xpack.securitySolution.enableExperimental=[assistantBedrockChat]`

As of this PR `integration_assistant` is still going to use
`ActionsClientSimpleChatModel`. After the FF will be enabled by default
we will switch `integration_assistant` to use new chat model.

Thank you @stephmilovic a ton 🙇

---------

Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
Co-authored-by: Steph Milovic <stephanie.milovic@elastic.co>
Co-authored-by: Garrett Spong <spong@users.noreply.github.com>
This commit is contained in:
Patryk Kopyciński 2024-07-23 22:17:21 +02:00 committed by GitHub
parent e12e4496e0
commit 26dd61efa2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 1991 additions and 191 deletions

View file

@ -80,7 +80,7 @@
"resolutions": {
"**/@bazel/typescript/protobufjs": "6.11.4",
"**/@hello-pangea/dnd": "16.6.0",
"**/@langchain/core": "0.2.3",
"**/@langchain/core": "^0.2.17",
"**/@types/node": "20.10.5",
"**/@typescript-eslint/utils": "5.62.0",
"**/chokidar": "^3.5.3",
@ -88,6 +88,7 @@
"**/globule/minimatch": "^3.1.2",
"**/hoist-non-react-statics": "^3.3.2",
"**/isomorphic-fetch/node-fetch": "^2.6.7",
"**/langchain": "^0.2.10",
"**/react-intl/**/@types/react": "^17.0.45",
"**/remark-parse/trim": "1.0.1",
"**/sharp": "0.32.6",
@ -96,6 +97,8 @@
},
"dependencies": {
"@appland/sql-parser": "^1.5.1",
"@aws-crypto/sha256-js": "^5.2.0",
"@aws-crypto/util": "^5.2.0",
"@babel/runtime": "^7.24.7",
"@cfworker/json-schema": "^1.12.7",
"@dnd-kit/core": "^6.1.0",
@ -132,6 +135,7 @@
"@formatjs/intl-relativetimeformat": "^11.2.12",
"@formatjs/intl-utils": "^3.8.4",
"@formatjs/ts-transformer": "^3.13.14",
"@google/generative-ai": "^0.7.0",
"@grpc/grpc-js": "^1.8.22",
"@hapi/accept": "^5.0.2",
"@hapi/boom": "^9.1.4",
@ -940,10 +944,11 @@
"@kbn/xstate-utils": "link:packages/kbn-xstate-utils",
"@kbn/zod": "link:packages/kbn-zod",
"@kbn/zod-helpers": "link:packages/kbn-zod-helpers",
"@langchain/community": "^0.2.4",
"@langchain/core": "0.2.3",
"@langchain/langgraph": "^0.0.23",
"@langchain/openai": "^0.0.34",
"@langchain/community": "0.2.18",
"@langchain/core": "^0.2.17",
"@langchain/google-genai": "^0.0.23",
"@langchain/langgraph": "^0.0.29",
"@langchain/openai": "^0.1.3",
"@langtrase/trace-attributes": "^3.0.8",
"@launchdarkly/node-server-sdk": "^9.4.7",
"@loaders.gl/core": "^3.4.7",
@ -966,9 +971,11 @@
"@paralleldrive/cuid2": "^2.2.2",
"@reduxjs/toolkit": "1.9.7",
"@slack/webhook": "^7.0.1",
"@smithy/eventstream-codec": "^3.0.0",
"@smithy/eventstream-serde-node": "^3.0.0",
"@smithy/types": "^3.0.0",
"@smithy/eventstream-codec": "^3.1.1",
"@smithy/eventstream-serde-node": "^3.0.3",
"@smithy/protocol-http": "^4.0.2",
"@smithy/signature-v4": "^3.1.1",
"@smithy/types": "^3.2.0",
"@smithy/util-utf8": "^3.0.0",
"@tanstack/react-query": "^4.29.12",
"@tanstack/react-query-devtools": "^4.29.12",
@ -1082,8 +1089,8 @@
"jsonwebtoken": "^9.0.2",
"jsts": "^1.6.2",
"kea": "^2.6.0",
"langchain": "0.2.3",
"langsmith": "^0.1.30",
"langchain": "^0.2.10",
"langsmith": "^0.1.37",
"launchdarkly-js-client-sdk": "^3.4.0",
"launchdarkly-node-server-sdk": "^7.0.3",
"load-json-file": "^6.2.0",

View file

@ -21,4 +21,5 @@ export type AssistantFeatureKey = keyof AssistantFeatures;
export const defaultAssistantFeatures = Object.freeze({
assistantKnowledgeBaseByDefault: false,
assistantModelEvaluation: false,
assistantBedrockChat: false,
});

View file

@ -18,6 +18,7 @@ import { z } from 'zod';
export type GetCapabilitiesResponse = z.infer<typeof GetCapabilitiesResponse>;
export const GetCapabilitiesResponse = z.object({
assistantBedrockChat: z.boolean(),
assistantKnowledgeBaseByDefault: z.boolean(),
assistantModelEvaluation: z.boolean(),
});

View file

@ -19,11 +19,14 @@ paths:
schema:
type: object
properties:
assistantBedrockChat:
type: boolean
assistantKnowledgeBaseByDefault:
type: boolean
assistantModelEvaluation:
type: boolean
required:
- assistantBedrockChat
- assistantKnowledgeBaseByDefault
- assistantModelEvaluation
'400':

View file

@ -5,9 +5,11 @@
* 2.0.
*/
import { ActionsClientBedrockChatModel } from './language_models/bedrock_chat';
import { ActionsClientChatOpenAI } from './language_models/chat_openai';
import { ActionsClientLlm } from './language_models/llm';
import { ActionsClientSimpleChatModel } from './language_models/simple_chat_model';
import { ActionsClientGeminiChatModel } from './language_models/gemini_chat';
import { parseBedrockStream } from './utils/bedrock';
import { parseGeminiResponse } from './utils/gemini';
import { getDefaultArguments } from './language_models/constants';
@ -16,7 +18,9 @@ export {
parseBedrockStream,
parseGeminiResponse,
getDefaultArguments,
ActionsClientBedrockChatModel,
ActionsClientChatOpenAI,
ActionsClientGeminiChatModel,
ActionsClientLlm,
ActionsClientSimpleChatModel,
};

View file

@ -0,0 +1,84 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { BedrockChat as _BedrockChat } from '@langchain/community/chat_models/bedrock/web';
import type { ActionsClient } from '@kbn/actions-plugin/server';
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
import { Logger } from '@kbn/logging';
import { Readable } from 'stream';
import { PublicMethodsOf } from '@kbn/utility-types';
export const DEFAULT_BEDROCK_MODEL = 'anthropic.claude-3-5-sonnet-20240620-v1:0';
export const DEFAULT_BEDROCK_REGION = 'us-east-1';
export interface CustomChatModelInput extends BaseChatModelParams {
actionsClient: PublicMethodsOf<ActionsClient>;
connectorId: string;
logger: Logger;
temperature?: number;
signal?: AbortSignal;
model?: string;
maxTokens?: number;
}
export class ActionsClientBedrockChatModel extends _BedrockChat {
constructor({ actionsClient, connectorId, logger, ...params }: CustomChatModelInput) {
super({
...params,
credentials: { accessKeyId: '', secretAccessKey: '' },
// only needed to force BedrockChat to use messages api for Claude v2
model: params.model ?? DEFAULT_BEDROCK_MODEL,
region: DEFAULT_BEDROCK_REGION,
fetchFn: async (url, options) => {
const inputBody = JSON.parse(options?.body as string);
if (this.streaming && !inputBody.tools?.length) {
const data = (await actionsClient.execute({
actionId: connectorId,
params: {
subAction: 'invokeStream',
subActionParams: {
messages: inputBody.messages,
temperature: params.temperature ?? inputBody.temperature,
stopSequences: inputBody.stop_sequences,
system: inputBody.system,
maxTokens: params.maxTokens ?? inputBody.max_tokens,
tools: inputBody.tools,
anthropicVersion: inputBody.anthropic_version,
},
},
})) as { data: Readable };
return {
body: Readable.toWeb(data.data),
} as unknown as Response;
}
const data = (await actionsClient.execute({
actionId: connectorId,
params: {
subAction: 'invokeAIRaw',
subActionParams: {
messages: inputBody.messages,
temperature: params.temperature ?? inputBody.temperature,
stopSequences: inputBody.stop_sequences,
system: inputBody.system,
maxTokens: params.maxTokens ?? inputBody.max_tokens,
tools: inputBody.tools,
anthropicVersion: inputBody.anthropic_version,
},
},
})) as { status: string; data: { message: string } };
return {
ok: data.status === 'ok',
json: () => data.data,
} as unknown as Response;
},
});
}
}

View file

@ -0,0 +1,461 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import {
Content,
EnhancedGenerateContentResponse,
FunctionCallPart,
FunctionResponsePart,
GenerateContentRequest,
GenerateContentResult,
InlineDataPart,
POSSIBLE_ROLES,
Part,
TextPart,
} from '@google/generative-ai';
import { ActionsClient } from '@kbn/actions-plugin/server';
import { PublicMethodsOf } from '@kbn/utility-types';
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
import { ToolCallChunk } from '@langchain/core/dist/messages/tool';
import {
AIMessageChunk,
BaseMessage,
ChatMessage,
isBaseMessage,
UsageMetadata,
} from '@langchain/core/messages';
import { ChatGenerationChunk } from '@langchain/core/outputs';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { Logger } from '@kbn/logging';
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
import { get } from 'lodash/fp';
import { Readable } from 'stream';
const DEFAULT_GEMINI_TEMPERATURE = 0;
export interface CustomChatModelInput extends BaseChatModelParams {
actionsClient: PublicMethodsOf<ActionsClient>;
connectorId: string;
logger: Logger;
temperature?: number;
signal?: AbortSignal;
model?: string;
maxTokens?: number;
}
export class ActionsClientGeminiChatModel extends ChatGoogleGenerativeAI {
#actionsClient: PublicMethodsOf<ActionsClient>;
#connectorId: string;
#temperature: number;
#model?: string;
constructor({ actionsClient, connectorId, ...props }: CustomChatModelInput) {
super({
...props,
apiKey: 'asda',
maxOutputTokens: props.maxTokens ?? 2048,
});
// LangChain needs model to be defined for logging purposes
this.model = props.model ?? this.model;
// If model is not specified by consumer, the connector will defin eit so do not pass
// a LangChain default to the actionsClient
this.#model = props.model;
this.#temperature = props.temperature ?? DEFAULT_GEMINI_TEMPERATURE;
this.#actionsClient = actionsClient;
this.#connectorId = connectorId;
}
async completionWithRetry(
request: string | GenerateContentRequest | Array<string | Part>,
options?: this['ParsedCallOptions']
): Promise<GenerateContentResult> {
return this.caller.callWithOptions({ signal: options?.signal }, async () => {
try {
const requestBody = {
actionId: this.#connectorId,
params: {
subAction: 'invokeAIRaw',
subActionParams: {
model: this.#model,
messages: request,
temperature: this.#temperature,
},
},
};
const actionResult = (await this.#actionsClient.execute(requestBody)) as {
status: string;
data: EnhancedGenerateContentResponse;
message?: string;
serviceMessage?: string;
};
if (actionResult.status === 'error') {
throw new Error(
`ActionsClientGeminiChatModel: action result status is error: ${actionResult?.message} - ${actionResult?.serviceMessage}`
);
}
return {
response: {
...actionResult.data,
functionCalls: () =>
actionResult.data?.candidates?.[0]?.content?.parts[0].functionCall
? [actionResult.data?.candidates?.[0]?.content.parts[0].functionCall]
: [],
},
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
// TODO: Improve error handling
if (e.message?.includes('400 Bad Request')) {
e.status = 400;
}
throw e;
}
});
}
async *_streamResponseChunks(
messages: BaseMessage[],
options: this['ParsedCallOptions'],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<ChatGenerationChunk> {
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel);
const parameters = this.invocationParams(options);
const request = {
...parameters,
contents: prompt,
};
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
const requestBody = {
actionId: this.#connectorId,
params: {
subAction: 'invokeStream',
subActionParams: {
model: this.#model,
messages: request.contents.reduce((acc: Content[], item) => {
if (!acc?.length) {
acc.push(item);
return acc;
}
if (acc[acc.length - 1].role === item.role) {
acc[acc.length - 1].parts = acc[acc.length - 1].parts.concat(item.parts);
return acc;
}
acc.push(item);
return acc;
}, []),
temperature: this.#temperature,
tools: request.tools,
},
},
};
const actionResult = await this.#actionsClient.execute(requestBody);
if (actionResult.status === 'error') {
throw new Error(
`ActionsClientGeminiChatModel: action result status is error: ${actionResult?.message} - ${actionResult?.serviceMessage}`
);
}
const readable = get('data', actionResult) as Readable;
if (typeof readable?.read !== 'function') {
throw new Error('Action result status is error: result is not streamable');
}
return readable;
});
let usageMetadata: UsageMetadata | undefined;
let index = 0;
let partialStreamChunk = '';
for await (const rawStreamChunk of stream) {
const streamChunk = rawStreamChunk.toString();
const nextChunk = `${partialStreamChunk + streamChunk}`;
let parsedStreamChunk: EnhancedGenerateContentResponse | null = null;
try {
parsedStreamChunk = JSON.parse(nextChunk.replaceAll('data: ', '').replaceAll('\r\n', ''));
partialStreamChunk = '';
} catch (_) {
partialStreamChunk += nextChunk;
}
if (parsedStreamChunk !== null && !parsedStreamChunk.candidates?.[0]?.finishReason) {
const response = {
...parsedStreamChunk,
functionCalls: () =>
parsedStreamChunk?.candidates?.[0]?.content.parts[0].functionCall
? [parsedStreamChunk.candidates?.[0]?.content.parts[0].functionCall]
: [],
};
if (
'usageMetadata' in response &&
this.streamUsage !== false &&
options.streamUsage !== false
) {
const genAIUsageMetadata = response.usageMetadata as {
promptTokenCount: number;
candidatesTokenCount: number;
totalTokenCount: number;
};
if (!usageMetadata) {
usageMetadata = {
input_tokens: genAIUsageMetadata.promptTokenCount,
output_tokens: genAIUsageMetadata.candidatesTokenCount,
total_tokens: genAIUsageMetadata.totalTokenCount,
};
} else {
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
// total each time, so we need to find the difference between the tokens.
const outputTokenDiff =
genAIUsageMetadata.candidatesTokenCount - usageMetadata.output_tokens;
usageMetadata = {
input_tokens: 0,
output_tokens: outputTokenDiff,
total_tokens: outputTokenDiff,
};
}
}
const chunk = convertResponseContentToChatGenerationChunk(response, {
usageMetadata,
index,
});
index += 1;
if (chunk) {
yield chunk;
await runManager?.handleLLMNewToken(chunk.text ?? '');
}
}
}
}
}
export function convertResponseContentToChatGenerationChunk(
response: EnhancedGenerateContentResponse,
extra: {
usageMetadata?: UsageMetadata | undefined;
index: number;
}
): ChatGenerationChunk | null {
if (!response.candidates || response.candidates.length === 0) {
return null;
}
const functionCalls = response.functionCalls();
const [candidate] = response.candidates;
const { content, ...generationInfo } = candidate;
const text = content?.parts[0]?.text ?? '';
const toolCallChunks: ToolCallChunk[] = [];
if (functionCalls) {
toolCallChunks.push(
...functionCalls.map((fc) => ({
...fc,
args: JSON.stringify(fc.args),
index: extra.index,
type: 'tool_call_chunk' as const,
}))
);
}
return new ChatGenerationChunk({
text,
message: new AIMessageChunk({
content: text,
name: !content ? undefined : content.role,
tool_call_chunks: toolCallChunks,
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
// so leave blank for now.
additional_kwargs: {},
usage_metadata: extra.usageMetadata,
}),
generationInfo,
});
}
export function convertAuthorToRole(author: string): typeof POSSIBLE_ROLES[number] {
switch (author) {
/**
* Note: Gemini currently is not supporting system messages
* we will convert them to human messages and merge with following
* */
case 'ai':
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
return 'model';
case 'system':
case 'human':
return 'user';
case 'tool':
case 'function':
return 'function';
default:
throw new Error(`Unknown / unsupported author: ${author}`);
}
}
export function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
return messages.reduce<{
content: Content[];
mergeWithPreviousContent: boolean;
}>(
(acc, message, index) => {
if (!isBaseMessage(message)) {
throw new Error('Unsupported message input');
}
const author = getMessageAuthor(message);
if (author === 'system' && index !== 0) {
throw new Error('System message should be the first one');
}
const role = convertAuthorToRole(author);
const parts = convertMessageContentToParts(message, isMultimodalModel);
if (acc.mergeWithPreviousContent) {
const prevContent = acc.content[acc.content.length - 1];
if (!prevContent) {
throw new Error(
'There was a problem parsing your system message. Please try a prompt without one.'
);
}
prevContent.parts.push(...parts);
return {
mergeWithPreviousContent: false,
content: acc.content,
};
}
let actualRole = role;
if (actualRole === 'function') {
// GenerativeAI API will throw an error if the role is not "user" or "model."
actualRole = 'user';
}
const content: Content = {
role: actualRole,
parts,
};
return {
mergeWithPreviousContent: author === 'system',
content: [...acc.content, content],
};
},
{ content: [], mergeWithPreviousContent: false }
).content;
}
export function convertMessageContentToParts(
message: BaseMessage,
isMultimodalModel: boolean
): Part[] {
if (typeof message.content === 'string' && message.content !== '') {
return [{ text: message.content }];
}
let functionCalls: FunctionCallPart[] = [];
let functionResponses: FunctionResponsePart[] = [];
let messageParts: Part[] = [];
if (
'tool_calls' in message &&
Array.isArray(message.tool_calls) &&
message.tool_calls.length > 0
) {
functionCalls = message.tool_calls.map((tc) => ({
functionCall: {
name: tc.name,
args: tc.args,
},
}));
} else if (message._getType() === 'tool' && message.name && message.content) {
functionResponses = [
{
functionResponse: {
name: message.name,
response: message.content,
},
},
];
} else if (Array.isArray(message.content)) {
messageParts = message.content.map((c) => {
if (c.type === 'text') {
return {
text: c.text,
} as TextPart;
}
if (c.type === 'image_url') {
if (!isMultimodalModel) {
throw new Error(`This model does not support images`);
}
let source;
if (typeof c.image_url === 'string') {
source = c.image_url;
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
source = c.image_url.url;
} else {
throw new Error('Please provide image as base64 encoded data URL');
}
const [dm, data] = source.split(',');
if (!dm.startsWith('data:')) {
throw new Error('Please provide image as base64 encoded data URL');
}
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';');
if (encoding !== 'base64') {
throw new Error('Please provide image as base64 encoded data URL');
}
return {
inlineData: {
data,
mimeType,
},
} as InlineDataPart;
} else if (c.type === 'media') {
return messageContentMedia(c);
} else if (c.type === 'tool_use') {
return {
functionCall: {
name: c.name,
args: c.input,
},
} as FunctionCallPart;
}
throw new Error(`Unknown content type ${(c as { type: string }).type}`);
});
}
return [...messageParts, ...functionCalls, ...functionResponses];
}
export function getMessageAuthor(message: BaseMessage) {
const type = message._getType();
if (ChatMessage.isInstance(message)) {
return message.role;
}
if (type === 'tool') {
return type;
}
return message.name ?? type;
}
// will be removed once FileDataPart is supported in @langchain/google-genai
function messageContentMedia(content: Record<string, unknown>): InlineDataPart {
if ('mimeType' in content && 'data' in content) {
return {
inlineData: {
mimeType: content.mimeType,
data: content.data,
},
} as InlineDataPart;
}
throw new Error('Invalid media content');
}

View file

@ -5,6 +5,8 @@
* 2.0.
*/
export { ActionsClientBedrockChatModel } from './bedrock_chat';
export { ActionsClientChatOpenAI } from './chat_openai';
export { ActionsClientGeminiChatModel } from './gemini_chat';
export { ActionsClientLlm } from './llm';
export { ActionsClientSimpleChatModel } from './simple_chat_model';

View file

@ -16,7 +16,7 @@ export interface InvokeAIActionParamsSchema {
function_call?: {
arguments: string;
name: string;
};
} | null;
tool_calls?: Array<{
id: string;

View file

@ -45,6 +45,19 @@ Object {
],
"type": "string",
},
"raw": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "boolean",
},
"signal": Object {
"flags": Object {
"default": [Function],
@ -153,6 +166,19 @@ Object {
],
"type": "string",
},
"raw": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "boolean",
},
"signal": Object {
"flags": Object {
"default": [Function],
@ -197,6 +223,27 @@ Object {
"presence": "optional",
},
"keys": Object {
"anthropicVersion": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"maxTokens": Object {
"flags": Object {
"default": [Function],
@ -372,6 +419,78 @@ Object {
],
"type": "number",
},
"tools": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
},
"keys": Object {
"description": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"input_schema": Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
"unknown": true,
},
"keys": Object {},
"preferences": Object {
"stripUnknown": Object {
"objects": false,
},
},
"type": "object",
},
"name": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
},
"type": "object",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
},
"type": "object",
}
@ -387,6 +506,27 @@ Object {
"presence": "optional",
},
"keys": Object {
"anthropicVersion": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"maxTokens": Object {
"flags": Object {
"default": [Function],
@ -562,12 +702,364 @@ Object {
],
"type": "number",
},
"tools": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
},
"keys": Object {
"description": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"input_schema": Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
"unknown": true,
},
"keys": Object {},
"preferences": Object {
"stripUnknown": Object {
"objects": false,
},
},
"type": "object",
},
"name": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
},
"type": "object",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
},
"type": "object",
}
`;
exports[`Connector type config checks detect connector type changes for: .bedrock 6`] = `
Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
},
"keys": Object {
"anthropicVersion": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"maxTokens": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "number",
},
"messages": Object {
"flags": Object {
"error": [Function],
},
"items": Array [
Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
},
"keys": Object {
"content": Object {
"flags": Object {
"error": [Function],
},
"metas": Array [
Object {
"x-oas-any-type": true,
},
],
"type": "any",
},
"role": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
},
"type": "object",
},
],
"type": "array",
},
"model": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"signal": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-any-type": true,
},
Object {
"x-oas-optional": true,
},
],
"type": "any",
},
"stopSequences": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"error": [Function],
"presence": "optional",
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
"system": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"temperature": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "number",
},
"timeout": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "number",
},
"tools": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
},
"keys": Object {
"description": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"input_schema": Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
"unknown": true,
},
"keys": Object {},
"preferences": Object {
"stripUnknown": Object {
"objects": false,
},
},
"type": "object",
},
"name": Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
},
"type": "object",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
},
"type": "object",
}
`;
exports[`Connector type config checks detect connector type changes for: .bedrock 7`] = `
Object {
"flags": Object {
"default": Object {
@ -612,7 +1104,7 @@ Object {
}
`;
exports[`Connector type config checks detect connector type changes for: .bedrock 7`] = `
exports[`Connector type config checks detect connector type changes for: .bedrock 8`] = `
Object {
"flags": Object {
"default": Object {
@ -655,7 +1147,7 @@ Object {
}
`;
exports[`Connector type config checks detect connector type changes for: .bedrock 8`] = `
exports[`Connector type config checks detect connector type changes for: .bedrock 9`] = `
Object {
"flags": Object {
"default": Object {
@ -3459,15 +3951,12 @@ Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
"metas": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
"x-oas-any-type": true,
},
],
"type": "string",
"type": "any",
},
"model": Object {
"flags": Object {
@ -3490,6 +3979,19 @@ Object {
],
"type": "string",
},
"raw": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "boolean",
},
"signal": Object {
"flags": Object {
"default": [Function],
@ -3610,15 +4112,12 @@ Object {
"flags": Object {
"error": [Function],
},
"rules": Array [
"metas": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
"x-oas-any-type": true,
},
],
"type": "string",
"type": "any",
},
"model": Object {
"flags": Object {
@ -3641,6 +4140,19 @@ Object {
],
"type": "string",
},
"raw": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "boolean",
},
"signal": Object {
"flags": Object {
"default": [Function],
@ -3832,6 +4344,33 @@ Object {
],
"type": "number",
},
"tools": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-any-type": true,
},
],
"type": "any",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
},
"type": "object",
}
@ -3957,6 +4496,152 @@ Object {
`;
exports[`Connector type config checks detect connector type changes for: .gemini 6`] = `
Object {
"flags": Object {
"default": Object {
"special": "deep",
},
"error": [Function],
"presence": "optional",
},
"keys": Object {
"messages": Object {
"flags": Object {
"error": [Function],
},
"metas": Array [
Object {
"x-oas-any-type": true,
},
],
"type": "any",
},
"model": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
"signal": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-any-type": true,
},
Object {
"x-oas-optional": true,
},
],
"type": "any",
},
"stopSequences": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"error": [Function],
"presence": "optional",
},
"rules": Array [
Object {
"args": Object {
"method": [Function],
},
"name": "custom",
},
],
"type": "string",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
"temperature": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "number",
},
"timeout": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "number",
},
"tools": Object {
"flags": Object {
"default": [Function],
"error": [Function],
"presence": "optional",
},
"items": Array [
Object {
"flags": Object {
"error": [Function],
"presence": "optional",
},
"metas": Array [
Object {
"x-oas-any-type": true,
},
],
"type": "any",
},
],
"metas": Array [
Object {
"x-oas-optional": true,
},
],
"type": "array",
},
},
"type": "object",
}
`;
exports[`Connector type config checks detect connector type changes for: .gemini 7`] = `
Object {
"flags": Object {
"default": Object {
@ -4029,7 +4714,7 @@ Object {
}
`;
exports[`Connector type config checks detect connector type changes for: .gemini 7`] = `
exports[`Connector type config checks detect connector type changes for: .gemini 8`] = `
Object {
"flags": Object {
"default": Object {
@ -4058,7 +4743,7 @@ Object {
}
`;
exports[`Connector type config checks detect connector type changes for: .gemini 8`] = `
exports[`Connector type config checks detect connector type changes for: .gemini 9`] = `
Object {
"flags": Object {
"default": Object {

View file

@ -11,14 +11,18 @@ import { KibanaRequest } from '@kbn/core/server';
import { actionsClientMock } from '@kbn/actions-plugin/server/actions_client/actions_client.mock';
import { loggerMock } from '@kbn/logging-mocks';
import { initializeAgentExecutorWithOptions } from 'langchain/agents';
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents';
import { mockActionResponse } from '../../../__mocks__/action_result_data';
import { langChainMessages } from '../../../__mocks__/lang_chain_messages';
import { KNOWLEDGE_BASE_INDEX_PATTERN } from '../../../routes/knowledge_base/constants';
import { callAgentExecutor } from '.';
import { PassThrough, Stream } from 'stream';
import { ActionsClientChatOpenAI, ActionsClientSimpleChatModel } from '@kbn/langchain/server';
import {
ActionsClientChatOpenAI,
ActionsClientBedrockChatModel,
ActionsClientSimpleChatModel,
} from '@kbn/langchain/server';
import { AgentExecutorParams } from '../executors/types';
import { ElasticsearchStore } from '../elasticsearch_store/elasticsearch_store';
@ -27,6 +31,7 @@ jest.mock('@kbn/langchain/server', () => {
return {
...original,
ActionsClientChatOpenAI: jest.fn(),
ActionsClientBedrockChatModel: jest.fn(),
ActionsClientSimpleChatModel: jest.fn(),
};
});
@ -47,6 +52,7 @@ const mockCall = jest.fn().mockImplementation(() =>
})
);
const mockInvoke = jest.fn().mockImplementation(() => Promise.resolve());
jest.mock('langchain/agents');
jest.mock('../elasticsearch_store/elasticsearch_store', () => ({
@ -97,6 +103,7 @@ const esStoreMock = new ElasticsearchStore(
);
const defaultProps: AgentExecutorParams<true> = {
actionsClient,
bedrockChatEnabled: false,
connectorId: mockConnectorId,
esClient: esClientMock,
esStore: esStoreMock,
@ -111,7 +118,14 @@ const bedrockProps = {
...defaultProps,
llmType: 'bedrock',
};
const bedrockChatProps = {
...defaultProps,
bedrockChatEnabled: true,
llmType: 'bedrock',
};
const executorMock = initializeAgentExecutorWithOptions as jest.Mock;
const agentExecutorMock = AgentExecutor as unknown as jest.Mock;
describe('callAgentExecutor', () => {
beforeEach(() => {
jest.clearAllMocks();
@ -121,6 +135,10 @@ describe('callAgentExecutor', () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
invoke: (props: any, more: any) => mockInvoke({ ...props, agentType }, more),
}));
agentExecutorMock.mockReturnValue({
call: mockCall,
invoke: mockInvoke,
});
});
describe('callAgentExecutor', () => {
@ -269,6 +287,60 @@ describe('callAgentExecutor', () => {
});
});
describe('BedrockChat', () => {
describe('when the agent is not streaming', () => {
it('creates an instance of ActionsClientBedrockChatModel with the expected context from the request', async () => {
await callAgentExecutor(bedrockChatProps);
expect(ActionsClientBedrockChatModel).toHaveBeenCalledWith({
actionsClient,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
signal: undefined,
model: undefined,
streaming: false,
temperature: 0,
llmType: 'bedrock',
});
});
it('returns the expected response', async () => {
const result = await callAgentExecutor(bedrockChatProps);
expect(result).toEqual({
body: {
connector_id: 'mock-connector-id',
data: mockActionResponse,
status: 'ok',
replacements: {},
trace_data: undefined,
},
headers: {
'content-type': 'application/json',
},
});
});
});
describe('when the agent is streaming', () => {
it('creates an instance of ActionsClientBedrockChatModel with the expected context from the request', async () => {
await callAgentExecutor({ ...bedrockChatProps, isStream: true });
expect(ActionsClientBedrockChatModel).toHaveBeenCalledWith({
actionsClient,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
signal: undefined,
model: undefined,
streaming: true,
temperature: 0,
llmType: 'bedrock',
});
});
});
});
describe.each([
['OpenAI', defaultProps],
['Bedrock', bedrockProps],

View file

@ -5,21 +5,22 @@
* 2.0.
*/
import agent, { Span } from 'elastic-apm-node';
import { initializeAgentExecutorWithOptions } from 'langchain/agents';
import {
initializeAgentExecutorWithOptions,
createToolCallingAgent,
AgentExecutor as lcAgentExecutor,
} from 'langchain/agents';
import { BufferMemory, ChatMessageHistory } from 'langchain/memory';
import { ToolInterface } from '@langchain/core/tools';
import { streamFactory } from '@kbn/ml-response-stream/server';
import { transformError } from '@kbn/securitysolution-es-utils';
import { RetrievalQAChain } from 'langchain/chains';
import {
getDefaultArguments,
ActionsClientChatOpenAI,
ActionsClientSimpleChatModel,
} from '@kbn/langchain/server';
import { MessagesPlaceholder } from '@langchain/core/prompts';
import { getDefaultArguments } from '@kbn/langchain/server';
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';
import { APMTracer } from '@kbn/langchain/server/tracers/apm';
import { withAssistantSpan } from '../tracers/apm/with_assistant_span';
import { getLlmClass } from '../../../routes/utils';
import { EsAnonymizationFieldsSchema } from '../../../ai_assistant_data_clients/anonymization_fields/types';
import { transformESSearchToAnonymizationFields } from '../../../ai_assistant_data_clients/anonymization_fields/helpers';
import { AgentExecutor } from '../executors/types';
@ -36,6 +37,7 @@ export const callAgentExecutor: AgentExecutor<true | false> = async ({
actionsClient,
alertsIndexPattern,
assistantTools = [],
bedrockChatEnabled,
connectorId,
esClient,
esStore,
@ -53,7 +55,7 @@ export const callAgentExecutor: AgentExecutor<true | false> = async ({
conversationId,
}) => {
const isOpenAI = llmType === 'openai';
const llmClass = isOpenAI ? ActionsClientChatOpenAI : ActionsClientSimpleChatModel;
const llmClass = getLlmClass(llmType, bedrockChatEnabled);
const llm = new llmClass({
actionsClient,
@ -134,6 +136,21 @@ export const callAgentExecutor: AgentExecutor<true | false> = async ({
agentType: 'openai-functions',
...executorArgs,
})
: llmType === 'bedrock' && bedrockChatEnabled
? new lcAgentExecutor({
agent: await createToolCallingAgent({
llm,
tools,
prompt: ChatPromptTemplate.fromMessages([
['system', 'You are a helpful assistant'],
['placeholder', '{chat_history}'],
['human', '{input}'],
['placeholder', '{agent_scratchpad}'],
]),
streamRunnable: isStream,
}),
tools,
})
: await initializeAgentExecutorWithOptions(tools, llm, {
agentType: 'structured-chat-zero-shot-react-description',
...executorArgs,

View file

@ -37,6 +37,7 @@ export interface AgentExecutorParams<T extends boolean> {
abortSignal?: AbortSignal;
alertsIndexPattern?: string;
actionsClient: PublicMethodsOf<ActionsClient>;
bedrockChatEnabled: boolean;
assistantTools?: AssistantTool[];
connectorId: string;
conversationId?: string;

View file

@ -33,6 +33,7 @@ import {
PERSIST_CONVERSATION_CHANGES_NODE,
persistConversationChanges,
} from './nodes/persist_conversation_changes';
import { RESPOND_NODE, respond } from './nodes/respond';
export const DEFAULT_ASSISTANT_GRAPH_ID = 'Default Security Assistant Graph';
@ -40,11 +41,15 @@ interface GetDefaultAssistantGraphParams {
agentRunnable: AgentRunnableSequence;
dataClients?: AssistantDataClients;
conversationId?: string;
getLlmInstance: () => BaseChatModel;
llm: BaseChatModel;
logger: Logger;
tools: StructuredTool[];
responseLanguage: string;
replacements: Replacements;
llmType: string | undefined;
bedrockChatEnabled?: boolean;
isStreaming: boolean;
}
export type DefaultAssistantGraph = ReturnType<typeof getDefaultAssistantGraph>;
@ -56,11 +61,15 @@ export const getDefaultAssistantGraph = ({
agentRunnable,
conversationId,
dataClients,
getLlmInstance,
llm,
logger,
responseLanguage,
tools,
replacements,
llmType,
bedrockChatEnabled,
isStreaming,
}: GetDefaultAssistantGraphParams) => {
try {
// Default graph state
@ -142,6 +151,12 @@ export const getDefaultAssistantGraph = ({
conversationId,
replacements,
});
const respondNode = (state: AgentState) =>
respond({
...nodeParams,
llm: getLlmInstance(),
state,
});
const shouldContinueEdge = (state: AgentState) => shouldContinue({ ...nodeParams, state });
const shouldContinueGenerateTitleEdge = (state: AgentState) =>
shouldContinueGenerateTitle({ ...nodeParams, state });
@ -158,6 +173,7 @@ export const getDefaultAssistantGraph = ({
| 'generateChatTitle'
| 'getPersistedConversation'
| 'persistConversationChanges'
| 'respond'
>({
channels: graphState,
});
@ -168,6 +184,13 @@ export const getDefaultAssistantGraph = ({
graph.addNode(AGENT_NODE, runAgentNode);
graph.addNode(TOOLS_NODE, executeToolsNode);
const hasRespondStep = isStreaming && bedrockChatEnabled && llmType === 'bedrock';
if (hasRespondStep) {
graph.addNode(RESPOND_NODE, respondNode);
graph.addEdge(RESPOND_NODE, END);
}
// Add edges, alternating between agent and action until finished
graph.addConditionalEdges(START, shouldContinueGetConversationEdge, {
continue: GET_PERSISTED_CONVERSATION_NODE,
@ -180,7 +203,10 @@ export const getDefaultAssistantGraph = ({
graph.addEdge(GENERATE_CHAT_TITLE_NODE, PERSIST_CONVERSATION_CHANGES_NODE);
graph.addEdge(PERSIST_CONVERSATION_CHANGES_NODE, AGENT_NODE);
// Add conditional edge for basic routing
graph.addConditionalEdges(AGENT_NODE, shouldContinueEdge, { continue: TOOLS_NODE, end: END });
graph.addConditionalEdges(AGENT_NODE, shouldContinueEdge, {
continue: TOOLS_NODE,
end: hasRespondStep ? RESPOND_NODE : END,
});
graph.addEdge(TOOLS_NODE, AGENT_NODE);
// Compile the graph
return graph.compile();

View file

@ -100,6 +100,8 @@ describe('streamGraph', () => {
logger: mockLogger,
onLlmResponse: mockOnLlmResponse,
request: mockRequest,
bedrockChatEnabled: false,
llmType: 'openai',
});
expect(response).toBe(mockResponseWithHeaders);
@ -179,6 +181,8 @@ describe('streamGraph', () => {
logger: mockLogger,
onLlmResponse: mockOnLlmResponse,
request: mockRequest,
bedrockChatEnabled: false,
llmType: 'gemini',
});
expect(response).toBe(mockResponseWithHeaders);

View file

@ -12,6 +12,7 @@ import { transformError } from '@kbn/securitysolution-es-utils';
import type { KibanaRequest } from '@kbn/core-http-server';
import type { ExecuteConnectorRequestBody, TraceData } from '@kbn/elastic-assistant-common';
import { APMTracer } from '@kbn/langchain/server/tracers/apm';
import { AIMessageChunk } from '@langchain/core/messages';
import { withAssistantSpan } from '../../tracers/apm/with_assistant_span';
import { AGENT_NODE_TAG } from './nodes/run_agent';
import { DEFAULT_ASSISTANT_GRAPH_ID, DefaultAssistantGraph } from './graph';
@ -20,7 +21,9 @@ import type { OnLlmResponse, TraceOptions } from '../../executors/types';
interface StreamGraphParams {
apmTracer: APMTracer;
assistantGraph: DefaultAssistantGraph;
bedrockChatEnabled: boolean;
inputs: { input: string };
llmType: string | undefined;
logger: Logger;
onLlmResponse?: OnLlmResponse;
request: KibanaRequest<unknown, unknown, ExecuteConnectorRequestBody>;
@ -40,6 +43,8 @@ interface StreamGraphParams {
*/
export const streamGraph = async ({
apmTracer,
llmType,
bedrockChatEnabled,
assistantGraph,
inputs,
logger,
@ -77,6 +82,39 @@ export const streamGraph = async ({
streamingSpan?.end();
};
if ((llmType === 'bedrock' || llmType === 'gemini') && bedrockChatEnabled) {
const stream = await assistantGraph.streamEvents(
inputs,
{
callbacks: [apmTracer, ...(traceOptions?.tracers ?? [])],
runName: DEFAULT_ASSISTANT_GRAPH_ID,
tags: traceOptions?.tags ?? [],
version: 'v2',
},
llmType === 'bedrock' ? { includeNames: ['Summarizer'] } : undefined
);
for await (const { event, data, tags } of stream) {
if ((tags || []).includes(AGENT_NODE_TAG)) {
if (event === 'on_chat_model_stream') {
const msg = data.chunk as AIMessageChunk;
if (!didEnd && !msg.tool_call_chunks?.length && msg.content.length) {
push({ payload: msg.content as string, type: 'content' });
}
}
if (
event === 'on_chat_model_end' &&
!data.output.lc_kwargs?.tool_calls?.length &&
!didEnd
) {
handleStreamEnd(data.output.content);
}
}
}
return responseWithHeaders;
}
let finalMessage = '';
let conversationId: string | undefined;
const stream = assistantGraph.streamEvents(inputs, {

View file

@ -7,13 +7,15 @@
import { StructuredTool } from '@langchain/core/tools';
import { RetrievalQAChain } from 'langchain/chains';
import { getDefaultArguments } from '@kbn/langchain/server';
import {
getDefaultArguments,
ActionsClientChatOpenAI,
ActionsClientSimpleChatModel,
} from '@kbn/langchain/server';
import { createOpenAIFunctionsAgent, createStructuredChatAgent } from 'langchain/agents';
createOpenAIFunctionsAgent,
createStructuredChatAgent,
createToolCallingAgent,
} from 'langchain/agents';
import { APMTracer } from '@kbn/langchain/server/tracers/apm';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { getLlmClass } from '../../../../routes/utils';
import { EsAnonymizationFieldsSchema } from '../../../../ai_assistant_data_clients/anonymization_fields/types';
import { AssistantToolParams } from '../../../../types';
import { AgentExecutor } from '../../executors/types';
@ -30,6 +32,7 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
actionsClient,
alertsIndexPattern,
assistantTools = [],
bedrockChatEnabled,
connectorId,
conversationId,
dataClients,
@ -49,25 +52,27 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
}) => {
const logger = parentLogger.get('defaultAssistantGraph');
const isOpenAI = llmType === 'openai';
const llmClass = isOpenAI ? ActionsClientChatOpenAI : ActionsClientSimpleChatModel;
const llmClass = getLlmClass(llmType, bedrockChatEnabled);
const getLlmInstance = () =>
new llmClass({
actionsClient,
connectorId,
llmType,
logger,
// possible client model override,
// let this be undefined otherwise so the connector handles the model
model: request.body.model,
// ensure this is defined because we default to it in the language_models
// This is where the LangSmith logs (Metadata > Invocation Params) are set
temperature: getDefaultArguments(llmType).temperature,
signal: abortSignal,
streaming: isStream,
// prevents the agent from retrying on failure
// failure could be due to bad connector, we should deliver that result to the client asap
maxRetries: 0,
});
const llm = new llmClass({
actionsClient,
connectorId,
llmType,
logger,
// possible client model override,
// let this be undefined otherwise so the connector handles the model
model: request.body.model,
// ensure this is defined because we default to it in the language_models
// This is where the LangSmith logs (Metadata > Invocation Params) are set
temperature: getDefaultArguments(llmType).temperature,
signal: abortSignal,
streaming: isStream,
// prevents the agent from retrying on failure
// failure could be due to bad connector, we should deliver that result to the client asap
maxRetries: 0,
});
const llm = getLlmInstance();
const anonymizationFieldsRes =
await dataClients?.anonymizationFieldsDataClient?.findDocuments<EsAnonymizationFieldsSchema>({
@ -117,6 +122,22 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
prompt: openAIFunctionAgentPrompt,
streamRunnable: isStream,
})
: llmType && ['bedrock', 'gemini'].includes(llmType) && bedrockChatEnabled
? createToolCallingAgent({
llm,
tools,
prompt: ChatPromptTemplate.fromMessages([
[
'system',
'You are a helpful assistant. ALWAYS use the provided tools. Use tools as often as possible, as they have access to the latest data and syntax.\n\n' +
`The final response will be the only output the user sees and should be a complete answer to the user's question, as if you were responding to the user's initial question, which is "{input}". The final response should never be empty.`,
],
['placeholder', '{chat_history}'],
['human', '{input}'],
['placeholder', '{agent_scratchpad}'],
]),
streamRunnable: isStream,
})
: await createStructuredChatAgent({
llm,
tools,
@ -131,10 +152,15 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
conversationId,
dataClients,
llm,
// we need to pass it like this or streaming does not work for bedrock
getLlmInstance,
logger,
tools,
responseLanguage,
replacements,
llmType,
bedrockChatEnabled,
isStreaming: isStream,
});
const inputs = { input: latestMessage[0]?.content as string };
@ -142,6 +168,8 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
return streamGraph({
apmTracer,
assistantGraph,
llmType,
bedrockChatEnabled,
inputs,
logger,
onLlmResponse,

View file

@ -8,6 +8,7 @@
import { RunnableConfig } from '@langchain/core/runnables';
import { StructuredTool } from '@langchain/core/tools';
import { ToolExecutor } from '@langchain/langgraph/prebuilt';
import { isArray } from 'lodash';
import { AgentState, NodeParamsBase } from '../types';
export interface ExecuteToolsParams extends NodeParamsBase {
@ -33,12 +34,21 @@ export const executeTools = async ({ config, logger, state, tools }: ExecuteTool
logger.debug(() => `Node state:\n${JSON.stringify(state, null, 2)}`);
const toolExecutor = new ToolExecutor({ tools });
const agentAction = state.agentOutcome;
const agentAction = isArray(state.agentOutcome) ? state.agentOutcome[0] : state.agentOutcome;
if (!agentAction || 'returnValues' in agentAction) {
throw new Error('Agent has not been run yet');
}
const out = await toolExecutor.invoke(agentAction, config);
let out;
try {
out = await toolExecutor.invoke(agentAction, config);
} catch (err) {
return {
steps: [{ action: agentAction, observation: JSON.stringify(`Error: ${err}`, null, 2) }],
};
}
return {
...state,
steps: [{ action: agentAction, observation: JSON.stringify(out, null, 2) }],

View file

@ -0,0 +1,39 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { StringWithAutocomplete } from '@langchain/core/dist/utils/types';
import { AGENT_NODE_TAG } from './run_agent';
import { AgentState } from '../types';
export const RESPOND_NODE = 'respond';
export const respond = async ({ llm, state }: { llm: BaseChatModel; state: AgentState }) => {
if (state?.agentOutcome && 'returnValues' in state.agentOutcome) {
const userMessage = [
'user',
`Respond exactly with
${state.agentOutcome?.returnValues?.output}
Do not verify, confirm or anything else. Just reply with the same content as provided above.`,
] as [StringWithAutocomplete<'user'>, string];
const responseMessage = await llm
// use AGENT_NODE_TAG to identify as agent node for stream parsing
.withConfig({ runName: 'Summarizer', tags: [AGENT_NODE_TAG] })
.invoke([userMessage]);
return {
agentOutcome: {
...state.agentOutcome,
returnValues: {
output: responseMessage.content,
},
},
};
}
return state;
};

View file

@ -42,6 +42,7 @@ export const runAgent = async ({
const agentOutcome = await agentRunnable.withConfig({ tags: [AGENT_NODE_TAG] }).invoke(
{
...state,
messages: state.messages.splice(-1),
chat_history: state.messages, // TODO: Message de-dupe with ...state spread
},
config

View file

@ -161,6 +161,8 @@ export const postEvaluateRoute = (
// Setup with kbDataClient if `enableKnowledgeBaseByDefault` FF is enabled
const enableKnowledgeBaseByDefault =
assistantContext.getRegisteredFeatures(pluginName).assistantKnowledgeBaseByDefault;
const bedrockChatEnabled =
assistantContext.getRegisteredFeatures(pluginName).assistantBedrockChat;
const kbDataClient = enableKnowledgeBaseByDefault
? (await assistantContext.getAIAssistantKnowledgeBaseDataClient()) ?? undefined
: undefined;
@ -193,6 +195,7 @@ export const postEvaluateRoute = (
const evalResult = await AGENT_EXECUTOR_MAP[agentName]({
actionsClient,
assistantTools,
bedrockChatEnabled,
connectorId,
esClient,
esStore,

View file

@ -346,6 +346,8 @@ export const langChainExecute = async ({
// Create an ElasticsearchStore for KB interactions
const kbDataClient =
(await assistantContext.getAIAssistantKnowledgeBaseDataClient()) ?? undefined;
const bedrockChatEnabled =
assistantContext.getRegisteredFeatures(pluginName).assistantBedrockChat;
const esStore = new ElasticsearchStore(
esClient,
kbDataClient?.indexTemplateAndPattern?.alias ?? '',
@ -368,6 +370,7 @@ export const langChainExecute = async ({
dataClients,
alertsIndexPattern: request.body.alertsIndexPattern,
actionsClient,
bedrockChatEnabled,
assistantTools,
conversationId,
connectorId,

View file

@ -13,6 +13,12 @@ import type {
KibanaResponseFactory,
CustomHttpResponseOptions,
} from '@kbn/core/server';
import {
ActionsClientChatOpenAI,
ActionsClientBedrockChatModel,
ActionsClientSimpleChatModel,
ActionsClientGeminiChatModel,
} from '@kbn/langchain/server';
import { CustomHttpRequestError } from './custom_http_request_error';
export interface OutputError {
@ -174,3 +180,12 @@ export const getLlmType = (actionTypeId: string): string | undefined => {
};
return llmTypeDictionary[actionTypeId];
};
export const getLlmClass = (llmType?: string, bedrockChatEnabled?: boolean) =>
llmType === 'openai'
? ActionsClientChatOpenAI
: llmType === 'bedrock' && bedrockChatEnabled
? ActionsClientBedrockChatModel
: llmType === 'gemini' && bedrockChatEnabled
? ActionsClientGeminiChatModel
: ActionsClientSimpleChatModel;

View file

@ -35,7 +35,9 @@ import {
import { AnonymizationFieldResponse } from '@kbn/elastic-assistant-common/impl/schemas/anonymization_fields/bulk_crud_anonymization_fields_route.gen';
import { LicensingApiRequestHandlerContext } from '@kbn/licensing-plugin/server';
import {
ActionsClientBedrockChatModel,
ActionsClientChatOpenAI,
ActionsClientGeminiChatModel,
ActionsClientLlm,
ActionsClientSimpleChatModel,
} from '@kbn/langchain/server';
@ -211,6 +213,12 @@ export interface AssistantTool {
getTool: (params: AssistantToolParams) => Tool | DynamicStructuredTool | null;
}
export type AssistantToolLlm =
| ActionsClientBedrockChatModel
| ActionsClientChatOpenAI
| ActionsClientGeminiChatModel
| ActionsClientSimpleChatModel;
export interface AssistantToolParams {
alertsIndexPattern?: string;
anonymizationFields?: AnonymizationFieldResponse[];
@ -219,7 +227,7 @@ export interface AssistantToolParams {
esClient: ElasticsearchClient;
kbDataClient?: AIAssistantKnowledgeBaseDataClient;
langChainTimeout?: number;
llm?: ActionsClientLlm | ActionsClientChatOpenAI | ActionsClientSimpleChatModel;
llm?: ActionsClientLlm | AssistantToolLlm;
logger: Logger;
modelExists: boolean;
onNewReplacements?: (newReplacements: Replacements) => void;

View file

@ -435,7 +435,10 @@ describe('conversational chain', () => {
expectedDocs: [
{
documents: [
{ metadata: { _id: '1', _index: 'index' } },
{
metadata: { _id: '1', _index: 'index' },
pageContent: expect.any(String),
},
{
metadata: { _id: '1', _index: 'website' },
pageContent: expect.any(String),
@ -446,8 +449,8 @@ describe('conversational chain', () => {
],
// Even with body_content of 1000, the token count should be below or equal to model limit of 100
expectedTokens: [
{ type: 'context_token_count', count: 65 },
{ type: 'prompt_token_count', count: 99 },
{ type: 'context_token_count', count: 63 },
{ type: 'prompt_token_count', count: 97 },
],
expectedHasClipped: true,
expectedSearchRequest: [

View file

@ -128,6 +128,11 @@ export const allowedExperimentalValues = Object.freeze({
*/
assistantKnowledgeBaseByDefault: false,
/**
* Enables the Assistant BedrockChat Langchain model, introduced in `8.15.0`.
*/
assistantBedrockChat: false,
/**
* Enables the Managed User section inside the new user details flyout.
*/

View file

@ -47,6 +47,7 @@ export const ESQL_KNOWLEDGE_BASE_TOOL: AssistantTool = {
return result.text;
},
tags: ['esql', 'query-generation', 'knowledge-base'],
});
// TODO: Remove after ZodAny is fixed https://github.com/langchain-ai/langchainjs/blob/main/langchain-core/src/tools.ts
}) as unknown as DynamicStructuredTool;
},
};

View file

@ -54,6 +54,7 @@ export const KNOWLEDGE_BASE_RETRIEVAL_TOOL: AssistantTool = {
return JSON.stringify(docs);
},
tags: ['knowledge-base'],
});
// TODO: Remove after ZodAny is fixed https://github.com/langchain-ai/langchainjs/blob/main/langchain-core/src/tools.ts
}) as unknown as DynamicStructuredTool;
},
};

View file

@ -66,6 +66,7 @@ export const KNOWLEDGE_BASE_WRITE_TOOL: AssistantTool = {
return "I've successfully saved this entry to your knowledge base. You can ask me to recall this information at any time.";
},
tags: ['knowledge-base'],
});
// TODO: Remove after ZodAny is fixed https://github.com/langchain-ai/langchainjs/blob/main/langchain-core/src/tools.ts
}) as unknown as DynamicStructuredTool;
},
};

View file

@ -541,6 +541,7 @@ export class Plugin implements ISecuritySolutionPlugin {
// Assistant Tool and Feature Registration
plugins.elasticAssistant.registerTools(APP_UI_ID, getAssistantTools());
plugins.elasticAssistant.registerFeatures(APP_UI_ID, {
assistantBedrockChat: config.experimentalFeatures.assistantBedrockChat,
assistantKnowledgeBaseByDefault: config.experimentalFeatures.assistantKnowledgeBaseByDefault,
assistantModelEvaluation: config.experimentalFeatures.assistantModelEvaluation,
});

View file

@ -17,6 +17,7 @@ export const BEDROCK_CONNECTOR_ID = '.bedrock';
export enum SUB_ACTION {
RUN = 'run',
INVOKE_AI = 'invokeAI',
INVOKE_AI_RAW = 'invokeAIRaw',
INVOKE_STREAM = 'invokeStream',
DASHBOARD = 'getDashboard',
TEST = 'test',

View file

@ -25,6 +25,7 @@ export const RunActionParamsSchema = schema.object({
// abort signal from client
signal: schema.maybe(schema.any()),
timeout: schema.maybe(schema.number()),
raw: schema.maybe(schema.boolean()),
});
export const InvokeAIActionParamsSchema = schema.object({
@ -42,12 +43,51 @@ export const InvokeAIActionParamsSchema = schema.object({
// abort signal from client
signal: schema.maybe(schema.any()),
timeout: schema.maybe(schema.number()),
anthropicVersion: schema.maybe(schema.string()),
tools: schema.maybe(
schema.arrayOf(
schema.object({
name: schema.string(),
description: schema.string(),
input_schema: schema.object({}, { unknowns: 'allow' }),
})
)
),
});
export const InvokeAIActionResponseSchema = schema.object({
message: schema.string(),
});
export const InvokeAIRawActionParamsSchema = schema.object({
messages: schema.arrayOf(
schema.object({
role: schema.string(),
content: schema.any(),
})
),
model: schema.maybe(schema.string()),
temperature: schema.maybe(schema.number()),
stopSequences: schema.maybe(schema.arrayOf(schema.string())),
system: schema.maybe(schema.string()),
maxTokens: schema.maybe(schema.number()),
// abort signal from client
signal: schema.maybe(schema.any()),
anthropicVersion: schema.maybe(schema.string()),
timeout: schema.maybe(schema.number()),
tools: schema.maybe(
schema.arrayOf(
schema.object({
name: schema.string(),
description: schema.string(),
input_schema: schema.object({}, { unknowns: 'allow' }),
})
)
),
});
export const InvokeAIRawActionResponseSchema = schema.object({}, { unknowns: 'allow' });
export const RunApiLatestResponseSchema = schema.object(
{
stop_reason: schema.maybe(schema.string()),

View file

@ -15,6 +15,8 @@ import {
RunActionResponseSchema,
InvokeAIActionParamsSchema,
InvokeAIActionResponseSchema,
InvokeAIRawActionParamsSchema,
InvokeAIRawActionResponseSchema,
StreamingResponseSchema,
RunApiLatestResponseSchema,
} from './schema';
@ -24,6 +26,8 @@ export type Secrets = TypeOf<typeof SecretsSchema>;
export type RunActionParams = TypeOf<typeof RunActionParamsSchema>;
export type InvokeAIActionParams = TypeOf<typeof InvokeAIActionParamsSchema>;
export type InvokeAIActionResponse = TypeOf<typeof InvokeAIActionResponseSchema>;
export type InvokeAIRawActionParams = TypeOf<typeof InvokeAIRawActionParamsSchema>;
export type InvokeAIRawActionResponse = TypeOf<typeof InvokeAIRawActionResponseSchema>;
export type RunApiLatestResponse = TypeOf<typeof RunApiLatestResponseSchema>;
export type RunActionResponse = TypeOf<typeof RunActionResponseSchema>;
export type StreamingResponse = TypeOf<typeof StreamingResponseSchema>;

View file

@ -19,6 +19,7 @@ export enum SUB_ACTION {
DASHBOARD = 'getDashboard',
TEST = 'test',
INVOKE_AI = 'invokeAI',
INVOKE_AI_RAW = 'invokeAIRaw',
INVOKE_STREAM = 'invokeStream',
}

View file

@ -20,12 +20,13 @@ export const SecretsSchema = schema.object({
});
export const RunActionParamsSchema = schema.object({
body: schema.string(),
body: schema.any(),
model: schema.maybe(schema.string()),
signal: schema.maybe(schema.any()),
timeout: schema.maybe(schema.number()),
temperature: schema.maybe(schema.number()),
stopSequences: schema.maybe(schema.arrayOf(schema.string())),
raw: schema.maybe(schema.boolean()),
});
export const RunApiResponseSchema = schema.object({
@ -52,6 +53,8 @@ export const RunActionResponseSchema = schema.object(
{ unknowns: 'ignore' }
);
export const RunActionRawResponseSchema = schema.any();
export const InvokeAIActionParamsSchema = schema.object({
messages: schema.any(),
model: schema.maybe(schema.string()),
@ -59,6 +62,16 @@ export const InvokeAIActionParamsSchema = schema.object({
stopSequences: schema.maybe(schema.arrayOf(schema.string())),
signal: schema.maybe(schema.any()),
timeout: schema.maybe(schema.number()),
tools: schema.maybe(schema.arrayOf(schema.any())),
});
export const InvokeAIRawActionParamsSchema = schema.object({
messages: schema.any(),
model: schema.maybe(schema.string()),
temperature: schema.maybe(schema.number()),
stopSequences: schema.maybe(schema.arrayOf(schema.string())),
signal: schema.maybe(schema.any()),
timeout: schema.maybe(schema.number()),
});
export const InvokeAIActionResponseSchema = schema.object({
@ -72,6 +85,8 @@ export const InvokeAIActionResponseSchema = schema.object({
),
});
export const InvokeAIRawActionResponseSchema = schema.any();
export const StreamingResponseSchema = schema.any();
export const DashboardActionParamsSchema = schema.object({

View file

@ -13,9 +13,12 @@ import {
SecretsSchema,
RunActionParamsSchema,
RunActionResponseSchema,
RunActionRawResponseSchema,
RunApiResponseSchema,
InvokeAIActionParamsSchema,
InvokeAIActionResponseSchema,
InvokeAIRawActionParamsSchema,
InvokeAIRawActionResponseSchema,
StreamingResponseSchema,
} from './schema';
@ -24,8 +27,11 @@ export type Secrets = TypeOf<typeof SecretsSchema>;
export type RunActionParams = TypeOf<typeof RunActionParamsSchema>;
export type RunApiResponse = TypeOf<typeof RunApiResponseSchema>;
export type RunActionResponse = TypeOf<typeof RunActionResponseSchema>;
export type RunActionRawResponse = TypeOf<typeof RunActionRawResponseSchema>;
export type DashboardActionParams = TypeOf<typeof DashboardActionParamsSchema>;
export type DashboardActionResponse = TypeOf<typeof DashboardActionResponseSchema>;
export type InvokeAIActionParams = TypeOf<typeof InvokeAIActionParamsSchema>;
export type InvokeAIActionResponse = TypeOf<typeof InvokeAIActionResponseSchema>;
export type InvokeAIRawActionParams = TypeOf<typeof InvokeAIRawActionParamsSchema>;
export type InvokeAIRawActionResponse = TypeOf<typeof InvokeAIRawActionResponseSchema>;
export type StreamingResponse = TypeOf<typeof StreamingResponseSchema>;

View file

@ -15,6 +15,8 @@ import { initDashboard } from '../lib/gen_ai/create_gen_ai_dashboard';
import {
RunActionParamsSchema,
InvokeAIActionParamsSchema,
InvokeAIRawActionParamsSchema,
InvokeAIRawActionResponseSchema,
StreamingResponseSchema,
RunActionResponseSchema,
RunApiLatestResponseSchema,
@ -26,6 +28,8 @@ import {
RunActionResponse,
InvokeAIActionParams,
InvokeAIActionResponse,
InvokeAIRawActionParams,
InvokeAIRawActionResponse,
RunApiLatestResponse,
} from '../../../common/bedrock/types';
import {
@ -90,6 +94,12 @@ export class BedrockConnector extends SubActionConnector<Config, Secrets> {
method: 'invokeStream',
schema: InvokeAIActionParamsSchema,
});
this.registerSubAction({
name: SUB_ACTION.INVOKE_AI_RAW,
method: 'invokeAIRaw',
schema: InvokeAIRawActionParamsSchema,
});
}
protected getResponseErrorMessage(error: AxiosError<{ message?: string }>): string {
@ -183,15 +193,15 @@ The Kibana Connector in use may need to be reconfigured with an updated Amazon B
return { available: response.success };
}
private async runApiDeprecated(
params: SubActionRequestParams<RunActionResponse> // : SubActionRequestParams<RunApiLatestResponseSchema>
): Promise<RunActionResponse> {
private async runApiRaw(
params: SubActionRequestParams<RunActionResponse | InvokeAIRawActionResponse>
): Promise<RunActionResponse | InvokeAIRawActionResponse> {
const response = await this.request(params);
return response.data;
}
private async runApiLatest(
params: SubActionRequestParams<RunApiLatestResponse> // : SubActionRequestParams<RunApiLatestResponseSchema>
params: SubActionRequestParams<RunApiLatestResponse>
): Promise<RunActionResponse> {
const response = await this.request(params);
// keeping the response the same as claude 2 for our APIs
@ -213,7 +223,8 @@ The Kibana Connector in use may need to be reconfigured with an updated Amazon B
model: reqModel,
signal,
timeout,
}: RunActionParams): Promise<RunActionResponse> {
raw,
}: RunActionParams): Promise<RunActionResponse | InvokeAIRawActionResponse> {
// set model on per request basis
const currentModel = reqModel ?? this.model;
const path = `/model/${currentModel}/invoke`;
@ -227,9 +238,13 @@ The Kibana Connector in use may need to be reconfigured with an updated Amazon B
// give up to 2 minutes for response
timeout: timeout ?? DEFAULT_TIMEOUT_MS,
};
if (raw) {
return this.runApiRaw({ ...requestArgs, responseSchema: InvokeAIRawActionResponseSchema });
}
// possible api received deprecated arguments, which will still work with the deprecated Claude 2 models
if (usesDeprecatedArguments(body)) {
return this.runApiDeprecated({ ...requestArgs, responseSchema: RunActionResponseSchema });
return this.runApiRaw({ ...requestArgs, responseSchema: RunActionResponseSchema });
}
return this.runApiLatest({ ...requestArgs, responseSchema: RunApiLatestResponseSchema });
}
@ -282,9 +297,12 @@ The Kibana Connector in use may need to be reconfigured with an updated Amazon B
temperature,
signal,
timeout,
}: InvokeAIActionParams): Promise<IncomingMessage> {
tools,
}: InvokeAIActionParams | InvokeAIRawActionParams): Promise<IncomingMessage> {
const res = (await this.streamApi({
body: JSON.stringify(formatBedrockBody({ messages, stopSequences, system, temperature })),
body: JSON.stringify(
formatBedrockBody({ messages, stopSequences, system, temperature, tools })
),
model,
signal,
timeout,
@ -310,16 +328,46 @@ The Kibana Connector in use may need to be reconfigured with an updated Amazon B
signal,
timeout,
}: InvokeAIActionParams): Promise<InvokeAIActionResponse> {
const res = await this.runApi({
const res = (await this.runApi({
body: JSON.stringify(
formatBedrockBody({ messages, stopSequences, system, temperature, maxTokens })
),
model,
signal,
timeout,
});
})) as RunActionResponse;
return { message: res.completion.trim() };
}
public async invokeAIRaw({
messages,
model,
stopSequences,
system,
temperature,
maxTokens = DEFAULT_TOKEN_LIMIT,
signal,
timeout,
tools,
anthropicVersion,
}: InvokeAIRawActionParams): Promise<InvokeAIRawActionResponse> {
const res = await this.runApi({
body: JSON.stringify({
messages,
stop_sequences: stopSequences,
system,
temperature,
max_tokens: maxTokens,
tools,
anthropic_version: anthropicVersion,
}),
model,
signal,
timeout,
raw: true,
});
return res;
}
}
const formatBedrockBody = ({
@ -328,19 +376,22 @@ const formatBedrockBody = ({
temperature = 0,
system,
maxTokens = DEFAULT_TOKEN_LIMIT,
tools,
}: {
messages: Array<{ role: string; content: string }>;
messages: Array<{ role: string; content?: string }>;
stopSequences?: string[];
temperature?: number;
maxTokens?: number;
// optional system message to be sent to the API
system?: string;
tools?: Array<{ name: string; description: string }>;
}) => ({
anthropic_version: 'bedrock-2023-05-31',
...ensureMessageFormat(messages, system),
max_tokens: maxTokens,
stop_sequences: stopSequences,
temperature,
tools,
});
/**
@ -350,12 +401,12 @@ const formatBedrockBody = ({
* @param messages
*/
const ensureMessageFormat = (
messages: Array<{ role: string; content: string }>,
messages: Array<{ role: string; content?: string }>,
systemPrompt?: string
): { messages: Array<{ role: string; content: string }>; system?: string } => {
): { messages: Array<{ role: string; content?: string }>; system?: string } => {
let system = systemPrompt ? systemPrompt : '';
const newMessages = messages.reduce((acc: Array<{ role: string; content: string }>, m) => {
const newMessages = messages.reduce((acc: Array<{ role: string; content?: string }>, m) => {
const lastMessage = acc[acc.length - 1];
if (m.role === 'system') {
system = `${system.length ? `${system}\n` : ''}${m.content}`;

View file

@ -161,6 +161,9 @@ describe('GeminiConnector', () => {
temperature: 0,
maxOutputTokens: 8192,
},
safety_settings: [
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH' },
],
}),
headers: {
Authorization: 'Bearer mock_access_token',
@ -190,6 +193,9 @@ describe('GeminiConnector', () => {
temperature: 0,
maxOutputTokens: 8192,
},
safety_settings: [
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH' },
],
}),
headers: {
Authorization: 'Bearer mock_access_token',
@ -237,6 +243,9 @@ describe('GeminiConnector', () => {
temperature: 0,
maxOutputTokens: 8192,
},
safety_settings: [
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH' },
],
}),
responseType: 'stream',
headers: {
@ -267,6 +276,9 @@ describe('GeminiConnector', () => {
temperature: 0,
maxOutputTokens: 8192,
},
safety_settings: [
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'BLOCK_ONLY_HIGH' },
],
}),
responseType: 'stream',
headers: {

View file

@ -13,10 +13,13 @@ import { SubActionRequestParams } from '@kbn/actions-plugin/server/sub_action_fr
import { getGoogleOAuthJwtAccessToken } from '@kbn/actions-plugin/server/lib/get_gcp_oauth_access_token';
import { ConnectorTokenClientContract } from '@kbn/actions-plugin/server/types';
import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai';
import {
RunActionParamsSchema,
RunApiResponseSchema,
RunActionRawResponseSchema,
InvokeAIActionParamsSchema,
InvokeAIRawActionParamsSchema,
StreamingResponseSchema,
} from '../../../common/gemini/schema';
import { initDashboard } from '../lib/gen_ai/create_gen_ai_dashboard';
@ -25,12 +28,15 @@ import {
Secrets,
RunActionParams,
RunActionResponse,
RunActionRawResponse,
RunApiResponse,
DashboardActionParams,
DashboardActionResponse,
StreamingResponse,
InvokeAIActionParams,
InvokeAIActionResponse,
InvokeAIRawActionParams,
InvokeAIRawActionResponse,
} from '../../../common/gemini/types';
import {
SUB_ACTION,
@ -55,6 +61,7 @@ interface Payload {
temperature: number;
maxOutputTokens: number;
};
safety_settings: Array<{ category: string; threshold: string }>;
}
export class GeminiConnector extends SubActionConnector<Config, Secrets> {
@ -103,6 +110,12 @@ export class GeminiConnector extends SubActionConnector<Config, Secrets> {
schema: InvokeAIActionParamsSchema,
});
this.registerSubAction({
name: SUB_ACTION.INVOKE_AI_RAW,
method: 'invokeAIRaw',
schema: InvokeAIRawActionParamsSchema,
});
this.registerSubAction({
name: SUB_ACTION.INVOKE_STREAM,
method: 'invokeStream',
@ -110,10 +123,20 @@ export class GeminiConnector extends SubActionConnector<Config, Secrets> {
});
}
protected getResponseErrorMessage(error: AxiosError<{ message?: string }>): string {
protected getResponseErrorMessage(
error: AxiosError<{
error?: { code?: number; message?: string; status?: string };
message?: string;
}>
): string {
if (!error.response?.status) {
return `Unexpected API Error: ${error.code ?? ''} - ${error.message ?? 'Unknown error'}`;
}
if (error.response?.data?.error) {
return `API Error: ${
error.response?.data?.error.status ? `${error.response.data.error.status}: ` : ''
}${error.response?.data?.error.message ? `${error.response.data.error.message}` : ''}`;
}
if (
error.response.status === 400 &&
error.response?.data?.message === 'The requested operation is not recognized by the service.'
@ -193,7 +216,8 @@ export class GeminiConnector extends SubActionConnector<Config, Secrets> {
model: reqModel,
signal,
timeout,
}: RunActionParams): Promise<RunActionResponse> {
raw,
}: RunActionParams): Promise<RunActionResponse | RunActionRawResponse> {
// set model on per request basis
const currentModel = reqModel ?? this.model;
const path = `/v1/projects/${this.gcpProjectID}/locations/${this.gcpRegion}/publishers/google/models/${currentModel}:generateContent`;
@ -209,10 +233,15 @@ export class GeminiConnector extends SubActionConnector<Config, Secrets> {
},
signal,
timeout: timeout ?? DEFAULT_TIMEOUT_MS,
responseSchema: RunApiResponseSchema,
responseSchema: raw ? RunActionRawResponseSchema : RunApiResponseSchema,
} as SubActionRequestParams<RunApiResponse>;
const response = await this.request(requestArgs);
if (raw) {
return response.data;
}
const candidate = response.data.candidates[0];
const usageMetadata = response.data.usageMetadata;
const completionText = candidate.content.parts[0].text;
@ -264,6 +293,24 @@ export class GeminiConnector extends SubActionConnector<Config, Secrets> {
return { message: res.completion, usageMetadata: res.usageMetadata };
}
public async invokeAIRaw({
messages,
model,
temperature = 0,
signal,
timeout,
}: InvokeAIRawActionParams): Promise<InvokeAIRawActionResponse> {
const res = await this.runApi({
body: JSON.stringify(formatGeminiPayload(messages, temperature)),
model,
signal,
timeout,
raw: true,
});
return res;
}
/**
* takes in an array of messages and a model as inputs. It calls the streamApi method to make a
* request to the Gemini API with the formatted messages and model. It then returns a Transform stream
@ -279,21 +326,21 @@ export class GeminiConnector extends SubActionConnector<Config, Secrets> {
temperature = 0,
signal,
timeout,
tools,
}: InvokeAIActionParams): Promise<IncomingMessage> {
const res = (await this.streamAPI({
body: JSON.stringify(formatGeminiPayload(messages, temperature)),
return (await this.streamAPI({
body: JSON.stringify({ ...formatGeminiPayload(messages, temperature), tools }),
model,
stopSequences,
signal,
timeout,
})) as unknown as IncomingMessage;
return res;
}
}
/** Format the json body to meet Gemini payload requirements */
const formatGeminiPayload = (
data: Array<{ role: string; content: string }>,
data: Array<{ role: string; content: string; parts: MessagePart[] }>,
temperature: number
): Payload => {
const payload: Payload = {
@ -302,26 +349,38 @@ const formatGeminiPayload = (
temperature,
maxOutputTokens: DEFAULT_TOKEN_LIMIT,
},
safety_settings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
// without setting threshold, the model will block responses about suspicious alerts
threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH,
},
],
};
let previousRole: string | null = null;
for (const row of data) {
const correctRole = row.role === 'assistant' ? 'model' : 'user';
if (correctRole === 'user' && previousRole === 'user') {
/** Append to the previous 'user' content
* This is to ensure that multiturn requests alternate between user and model
*/
payload.contents[payload.contents.length - 1].parts[0].text += ` ${row.content}`;
// if data is already preformatted by ActionsClientGeminiChatModel
if (row.parts) {
payload.contents.push(row);
} else {
// Add a new entry
payload.contents.push({
role: correctRole,
parts: [
{
text: row.content,
},
],
});
if (correctRole === 'user' && previousRole === 'user') {
/** Append to the previous 'user' content
* This is to ensure that multiturn requests alternate between user and model
*/
payload.contents[payload.contents.length - 1].parts[0].text += ` ${row.content}`;
} else {
// Add a new entry
payload.contents.push({
role: correctRole,
parts: [
{
text: row.content,
},
],
});
}
}
previousRole = correctRole;
}

256
yarn.lock
View file

@ -77,23 +77,32 @@
resolved "https://registry.yarnpkg.com/@assemblyscript/loader/-/loader-0.10.1.tgz#70e45678f06c72fa2e350e8553ec4a4d72b92e06"
integrity sha512-H71nDOOL8Y7kWRLqf6Sums+01Q5msqBW2KhDUTemh1tvY04eSkSXrK0uj/4mmY0Xr16/3zyZmsrxN7CKuRbNRg==
"@aws-crypto/crc32@3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@aws-crypto/crc32/-/crc32-3.0.0.tgz#07300eca214409c33e3ff769cd5697b57fdd38fa"
integrity sha512-IzSgsrxUcsrejQbPVilIKy16kAT52EwB6zSaI+M3xxIhKh5+aldEyvI+z6erM7TCLB2BJsFrtHjp6/4/sr+3dA==
"@aws-crypto/crc32@5.2.0":
version "5.2.0"
resolved "https://registry.yarnpkg.com/@aws-crypto/crc32/-/crc32-5.2.0.tgz#cfcc22570949c98c6689cfcbd2d693d36cdae2e1"
integrity sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==
dependencies:
"@aws-crypto/util" "^3.0.0"
"@aws-crypto/util" "^5.2.0"
"@aws-sdk/types" "^3.222.0"
tslib "^1.11.1"
tslib "^2.6.2"
"@aws-crypto/util@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@aws-crypto/util/-/util-3.0.0.tgz#1c7ca90c29293f0883468ad48117937f0fe5bfb0"
integrity sha512-2OJlpeJpCR48CC8r+uKVChzs9Iungj9wkZrl8Z041DWEWvyIHILYKCPNzJghKsivj+S3mLo6BVc7mBNzdxA46w==
"@aws-crypto/sha256-js@^5.2.0":
version "5.2.0"
resolved "https://registry.yarnpkg.com/@aws-crypto/sha256-js/-/sha256-js-5.2.0.tgz#c4fdb773fdbed9a664fc1a95724e206cf3860042"
integrity sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA==
dependencies:
"@aws-crypto/util" "^5.2.0"
"@aws-sdk/types" "^3.222.0"
tslib "^2.6.2"
"@aws-crypto/util@^5.2.0":
version "5.2.0"
resolved "https://registry.yarnpkg.com/@aws-crypto/util/-/util-5.2.0.tgz#71284c9cffe7927ddadac793c14f14886d3876da"
integrity sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==
dependencies:
"@aws-sdk/types" "^3.222.0"
"@aws-sdk/util-utf8-browser" "^3.0.0"
tslib "^1.11.1"
"@smithy/util-utf8" "^2.0.0"
tslib "^2.6.2"
"@aws-sdk/types@^3.222.0":
version "3.577.0"
@ -103,13 +112,6 @@
"@smithy/types" "^3.0.0"
tslib "^2.6.2"
"@aws-sdk/util-utf8-browser@^3.0.0":
version "3.259.0"
resolved "https://registry.yarnpkg.com/@aws-sdk/util-utf8-browser/-/util-utf8-browser-3.259.0.tgz#3275a6f5eb334f96ca76635b961d3c50259fd9ff"
integrity sha512-UvFa/vR+e19XookZF8RzFZBrw2EUkQWxiBW0yYQAhvk3C+QVGl0H3ouca8LDBlBfQKXwmW3huo/59H8rwb1wJw==
dependencies:
tslib "^2.3.1"
"@babel/cli@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/cli/-/cli-7.24.7.tgz#eb2868c1fa384b17ea88d60107577d3e6fd05c4e"
@ -2550,6 +2552,11 @@
resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6"
integrity sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==
"@google/generative-ai@^0.7.0":
version "0.7.1"
resolved "https://registry.yarnpkg.com/@google/generative-ai/-/generative-ai-0.7.1.tgz#eb187c75080c0706245699dbc06816c830d8c6a7"
integrity sha512-WTjMLLYL/xfA5BW6xAycRPiAX7FNHKAxrid/ayqC1QMam0KAK0NbMeS9Lubw80gVg5xFMLE+H7pw4wdNzTOlxw==
"@grpc/grpc-js@^1.7.1", "@grpc/grpc-js@^1.8.22":
version "1.8.22"
resolved "https://registry.yarnpkg.com/@grpc/grpc-js/-/grpc-js-1.8.22.tgz#847930c9af46e14df05b57fc12325db140ceff1d"
@ -7038,57 +7045,67 @@
resolved "https://registry.yarnpkg.com/@kwsites/promise-deferred/-/promise-deferred-1.1.1.tgz#8ace5259254426ccef57f3175bc64ed7095ed919"
integrity sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==
"@langchain/community@^0.2.4":
version "0.2.4"
resolved "https://registry.yarnpkg.com/@langchain/community/-/community-0.2.4.tgz#fb5feb4f4a01a1b33adfd28ce7126d0dedb3e6d1"
integrity sha512-rwrPNQLyIe84TPqPYbYOfDA4G/ba1rdj7OtZg63dQmxIvNDOmUCh4xIQac2iuRUnM3o4Ben0Faa9qz+V5oPgIA==
"@langchain/community@0.2.18":
version "0.2.18"
resolved "https://registry.yarnpkg.com/@langchain/community/-/community-0.2.18.tgz#127a7ac53a30dd6dedede887811fdd992061e2d2"
integrity sha512-UsCB97dMG87giQLniKx4bjv7OnMw2vQeavSt9gqOnGCnfb5IQBAgdjX4SjwFPbVGMz1HQoQKVlNqQ64ozCdgNg==
dependencies:
"@langchain/core" "~0.2.0"
"@langchain/openai" "~0.0.28"
"@langchain/core" "~0.2.11"
"@langchain/openai" "~0.1.0"
binary-extensions "^2.2.0"
expr-eval "^2.0.2"
flat "^5.0.2"
js-yaml "^4.1.0"
langchain "0.2.3"
langsmith "~0.1.1"
uuid "^9.0.0"
langsmith "~0.1.30"
uuid "^10.0.0"
zod "^3.22.3"
zod-to-json-schema "^3.22.5"
"@langchain/core@0.2.3", "@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>0.1.56 <0.3.0", "@langchain/core@>0.1.61 <0.3.0", "@langchain/core@~0.2.0":
version "0.2.3"
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.3.tgz#7faa82f92b0c7843506e827a38bfcbb60f009d13"
integrity sha512-mVuFHSLpPQ4yOHNXeoSA3LnmIMuFmUiit5rvbYcPZqM6SrB2zCNN2nD4Ty5+3H5X4tYItDoSqsTuUNUQySXRQw==
"@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>=0.2.11 <0.3.0", "@langchain/core@>=0.2.16 <0.3.0", "@langchain/core@>=0.2.5 <0.3.0", "@langchain/core@^0.2.17", "@langchain/core@~0.2.11":
version "0.2.17"
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.17.tgz#dfd44a2ccf79cef88ba765741a1c277bc22e483f"
integrity sha512-WnFiZ7R/ZUVeHO2IgcSL7Tu+CjApa26Iy99THJP5fax/NF8UQCc/ZRcw2Sb/RUuRPVm6ALDass0fSQE1L9YNJg==
dependencies:
ansi-styles "^5.0.0"
camelcase "6"
decamelize "1.2.0"
js-tiktoken "^1.0.12"
langsmith "~0.1.7"
langsmith "~0.1.30"
ml-distance "^4.0.0"
mustache "^4.2.0"
p-queue "^6.6.2"
p-retry "4"
uuid "^9.0.0"
uuid "^10.0.0"
zod "^3.22.4"
zod-to-json-schema "^3.22.3"
"@langchain/langgraph@^0.0.23":
"@langchain/google-genai@^0.0.23":
version "0.0.23"
resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.23.tgz#34b5ad5dc9fe644ee96bcfcf11197ec1d7f9e0e2"
integrity sha512-pXlcsBOseT5xdf9enUqbLQ/59LaZxgMI2dL2vFJ+EpcoK7bQnlzzhRtRPp+vubMyMeEKRoAXlaA9ObwpVi93CA==
resolved "https://registry.yarnpkg.com/@langchain/google-genai/-/google-genai-0.0.23.tgz#e73af501bc1df4c7642b531759b82dc3eb7ae459"
integrity sha512-MTSCJEoKsfU1inz0PWvAjITdNFM4s41uvBCwLpcgx3jWJIEisczFD82x86ahYqJlb2fD6tohYSaCH/4tKAdkXA==
dependencies:
"@langchain/core" ">0.1.61 <0.3.0"
uuid "^9.0.1"
"@google/generative-ai" "^0.7.0"
"@langchain/core" ">=0.2.16 <0.3.0"
zod-to-json-schema "^3.22.4"
"@langchain/openai@^0.0.34", "@langchain/openai@~0.0.28":
version "0.0.34"
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.0.34.tgz#36c9bca0721ab9f7e5d40927e7c0429cacbd5b56"
integrity sha512-M+CW4oXle5fdoz2T2SwdOef8pl3/1XmUx1vjn2mXUVM/128aO0l23FMF0SNBsAbRV6P+p/TuzjodchJbi0Ht/A==
"@langchain/langgraph@^0.0.29":
version "0.0.29"
resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.29.tgz#eda31d101e7a75981e0929661c41ab2461ff8640"
integrity sha512-BSFFJarkXqrMdH9yH6AIiBCw4ww0VsXXpBwqaw+9/7iulW0pBFRSkWXHjEYnmsdCRgyIxoP8vYQAQ8Jtu3qzZA==
dependencies:
"@langchain/core" ">0.1.56 <0.3.0"
"@langchain/core" ">=0.2.16 <0.3.0"
uuid "^10.0.0"
zod "^3.23.8"
"@langchain/openai@>=0.1.0 <0.3.0", "@langchain/openai@^0.1.3", "@langchain/openai@~0.1.0":
version "0.1.3"
resolved "https://registry.yarnpkg.com/@langchain/openai/-/openai-0.1.3.tgz#6eb0994e970d85ffa9aaeafb94449024ccf6ca63"
integrity sha512-riv/JC9x2A8b7GcHu8sx+mlZJ8KAwSSi231IPTlcciYnKozmrQ5H0vrtiD31fxiDbaRsk7tyCpkSBIOQEo7CyQ==
dependencies:
"@langchain/core" ">=0.2.5 <0.3.0"
js-tiktoken "^1.0.12"
openai "^4.41.1"
openai "^4.49.1"
zod "^3.22.4"
zod-to-json-schema "^3.22.3"
@ -8354,34 +8371,41 @@
"@types/node" ">=18.0.0"
axios "^1.6.0"
"@smithy/eventstream-codec@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/eventstream-codec/-/eventstream-codec-3.0.0.tgz#81d30391220f73d41f432f65384b606d67673e46"
integrity sha512-PUtyEA0Oik50SaEFCZ0WPVtF9tz/teze2fDptW6WRXl+RrEenH8UbEjudOz8iakiMl3lE3lCVqYf2Y+znL8QFQ==
"@smithy/eventstream-codec@^3.1.1":
version "3.1.1"
resolved "https://registry.yarnpkg.com/@smithy/eventstream-codec/-/eventstream-codec-3.1.1.tgz#b47f30bf4ad791ac7981b9fff58e599d18269cf9"
integrity sha512-s29NxV/ng1KXn6wPQ4qzJuQDjEtxLdS0+g5PQFirIeIZrp66FXVJ5IpZRowbt/42zB5dY8TqJ0G0L9KkgtsEZg==
dependencies:
"@aws-crypto/crc32" "3.0.0"
"@smithy/types" "^3.0.0"
"@aws-crypto/crc32" "5.2.0"
"@smithy/types" "^3.2.0"
"@smithy/util-hex-encoding" "^3.0.0"
tslib "^2.6.2"
"@smithy/eventstream-serde-node@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/eventstream-serde-node/-/eventstream-serde-node-3.0.0.tgz#6519523fbb429307be29b151b8ba35bcca2b6e64"
integrity sha512-baRPdMBDMBExZXIUAoPGm/hntixjt/VFpU6+VmCyiYJYzRHRxoaI1MN+5XE+hIS8AJ2GCHLMFEIOLzq9xx1EgQ==
"@smithy/eventstream-serde-node@^3.0.3":
version "3.0.3"
resolved "https://registry.yarnpkg.com/@smithy/eventstream-serde-node/-/eventstream-serde-node-3.0.3.tgz#51df0ca39f453d78a3d6607c1ac2e96cf900c824"
integrity sha512-v61Ftn7x/ubWFqH7GHFAL/RaU7QZImTbuV95DYugYYItzpO7KaHYEuO8EskCaBpZEfzOxhUGKm4teS9YUSt69Q==
dependencies:
"@smithy/eventstream-serde-universal" "^3.0.0"
"@smithy/types" "^3.0.0"
"@smithy/eventstream-serde-universal" "^3.0.3"
"@smithy/types" "^3.2.0"
tslib "^2.6.2"
"@smithy/eventstream-serde-universal@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-3.0.0.tgz#cb8441a73fbde4cbaa68e4a21236f658d914a073"
integrity sha512-HNFfShmotWGeAoW4ujP8meV9BZavcpmerDbPIjkJbxKbN8RsUcpRQ/2OyIxWNxXNH2GWCAxuSB7ynmIGJlQ3Dw==
"@smithy/eventstream-serde-universal@^3.0.3":
version "3.0.3"
resolved "https://registry.yarnpkg.com/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-3.0.3.tgz#2ecac479ba84e10221b4b70545f3d7a223b5345e"
integrity sha512-YXYt3Cjhu9tRrahbTec2uOjwOSeCNfQurcWPGNEUspBhqHoA3KrDrVj+jGbCLWvwkwhzqDnnaeHAxm+IxAjOAQ==
dependencies:
"@smithy/eventstream-codec" "^3.0.0"
"@smithy/types" "^3.0.0"
"@smithy/eventstream-codec" "^3.1.1"
"@smithy/types" "^3.2.0"
tslib "^2.6.2"
"@smithy/is-array-buffer@^2.0.0":
version "2.0.0"
resolved "https://registry.yarnpkg.com/@smithy/is-array-buffer/-/is-array-buffer-2.0.0.tgz#8fa9b8040651e7ba0b2f6106e636a91354ff7d34"
integrity sha512-z3PjFjMyZNI98JFRJi/U0nGoLWMSJlDjAW4QUX2WNZLas5C0CmVV6LJ01JI0k90l7FvpmixjWxPFmENSClQ7ug==
dependencies:
tslib "^2.5.0"
"@smithy/is-array-buffer@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/is-array-buffer/-/is-array-buffer-3.0.0.tgz#9a95c2d46b8768946a9eec7f935feaddcffa5e7a"
@ -8389,13 +8413,42 @@
dependencies:
tslib "^2.6.2"
"@smithy/types@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/types/-/types-3.0.0.tgz#00231052945159c64ffd8b91e8909d8d3006cb7e"
integrity sha512-VvWuQk2RKFuOr98gFhjca7fkBS+xLLURT8bUjk5XQoV0ZLm7WPwWPPY3/AwzTLuUBDeoKDCthfe1AsTUWaSEhw==
"@smithy/protocol-http@^4.0.2":
version "4.0.2"
resolved "https://registry.yarnpkg.com/@smithy/protocol-http/-/protocol-http-4.0.2.tgz#502ed3116cb0f1e3f207881df965bac620ccb2da"
integrity sha512-X/90xNWIOqSR2tLUyWxVIBdatpm35DrL44rI/xoeBWUuanE0iyCXJpTcnqlOpnEzgcu0xCKE06+g70TTu2j7RQ==
dependencies:
"@smithy/types" "^3.2.0"
tslib "^2.6.2"
"@smithy/signature-v4@^3.1.1":
version "3.1.1"
resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-3.1.1.tgz#4882aacb3260a47b8279b2ffc6a135e03e225260"
integrity sha512-2/vlG86Sr489XX8TA/F+VDA+P04ESef04pSz0wRtlQBExcSPjqO08rvrkcas2zLnJ51i+7ukOURCkgqixBYjSQ==
dependencies:
"@smithy/is-array-buffer" "^3.0.0"
"@smithy/types" "^3.2.0"
"@smithy/util-hex-encoding" "^3.0.0"
"@smithy/util-middleware" "^3.0.2"
"@smithy/util-uri-escape" "^3.0.0"
"@smithy/util-utf8" "^3.0.0"
tslib "^2.6.2"
"@smithy/types@^3.0.0", "@smithy/types@^3.2.0":
version "3.2.0"
resolved "https://registry.yarnpkg.com/@smithy/types/-/types-3.2.0.tgz#1350fe8a50d5e35e12ffb34be46d946860b2b5ab"
integrity sha512-cKyeKAPazZRVqm7QPvcPD2jEIt2wqDPAL1KJKb0f/5I7uhollvsWZuZKLclmyP6a+Jwmr3OV3t+X0pZUUHS9BA==
dependencies:
tslib "^2.6.2"
"@smithy/util-buffer-from@^2.0.0":
version "2.0.0"
resolved "https://registry.yarnpkg.com/@smithy/util-buffer-from/-/util-buffer-from-2.0.0.tgz#7eb75d72288b6b3001bc5f75b48b711513091deb"
integrity sha512-/YNnLoHsR+4W4Vf2wL5lGv0ksg8Bmk3GEGxn2vEQt52AQaPSCuaO5PM5VM7lP1K9qHRKHwrPGktqVoAHKWHxzw==
dependencies:
"@smithy/is-array-buffer" "^2.0.0"
tslib "^2.5.0"
"@smithy/util-buffer-from@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/util-buffer-from/-/util-buffer-from-3.0.0.tgz#559fc1c86138a89b2edaefc1e6677780c24594e3"
@ -8411,6 +8464,29 @@
dependencies:
tslib "^2.6.2"
"@smithy/util-middleware@^3.0.2":
version "3.0.2"
resolved "https://registry.yarnpkg.com/@smithy/util-middleware/-/util-middleware-3.0.2.tgz#6daeb9db060552d851801cd7a0afd68769e2f98b"
integrity sha512-7WW5SD0XVrpfqljBYzS5rLR+EiDzl7wCVJZ9Lo6ChNFV4VYDk37Z1QI5w/LnYtU/QKnSawYoHRd7VjSyC8QRQQ==
dependencies:
"@smithy/types" "^3.2.0"
tslib "^2.6.2"
"@smithy/util-uri-escape@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/util-uri-escape/-/util-uri-escape-3.0.0.tgz#e43358a78bf45d50bb736770077f0f09195b6f54"
integrity sha512-LqR7qYLgZTD7nWLBecUi4aqolw8Mhza9ArpNEQ881MJJIU2sE5iHCK6TdyqqzcDLy0OPe10IY4T8ctVdtynubg==
dependencies:
tslib "^2.6.2"
"@smithy/util-utf8@^2.0.0":
version "2.0.0"
resolved "https://registry.yarnpkg.com/@smithy/util-utf8/-/util-utf8-2.0.0.tgz#b4da87566ea7757435e153799df9da717262ad42"
integrity sha512-rctU1VkziY84n5OXe3bPNpKR001ZCME2JCaBBFgtiM2hfKbHFudc/BkMuPab8hRbLd0j3vbnBTTZ1igBf0wgiQ==
dependencies:
"@smithy/util-buffer-from" "^2.0.0"
tslib "^2.5.0"
"@smithy/util-utf8@^3.0.0":
version "3.0.0"
resolved "https://registry.yarnpkg.com/@smithy/util-utf8/-/util-utf8-3.0.0.tgz#1a6a823d47cbec1fd6933e5fc87df975286d9d6a"
@ -21804,24 +21880,24 @@ kuler@^2.0.0:
resolved "https://registry.yarnpkg.com/kuler/-/kuler-2.0.0.tgz#e2c570a3800388fb44407e851531c1d670b061b3"
integrity sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==
langchain@0.2.3:
version "0.2.3"
resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.3.tgz#c14bb05cf871b21bd63b84b3ab89580b1d62539f"
integrity sha512-T9xR7zd+Nj0oXy6WoYKmZLy0DlQiDLFPGYWdOXDxy+AvqlujoPdVQgDSpdqiOHvAjezrByAoKxoHCz5XMwTP/Q==
langchain@0.2.3, langchain@^0.2.10:
version "0.2.10"
resolved "https://registry.yarnpkg.com/langchain/-/langchain-0.2.10.tgz#35b74038e54650efbd9fe7d9d59765fe2790bb47"
integrity sha512-i0fC+RlX/6w6HKPWL3N5zrhrkijvpe2Xu4t/qbWzq4uFf8WBfPwmNFom3RtO2RatuPnHLm8mViU6nw8YBDiVwA==
dependencies:
"@langchain/core" "~0.2.0"
"@langchain/openai" "~0.0.28"
"@langchain/core" ">=0.2.11 <0.3.0"
"@langchain/openai" ">=0.1.0 <0.3.0"
"@langchain/textsplitters" "~0.0.0"
binary-extensions "^2.2.0"
js-tiktoken "^1.0.12"
js-yaml "^4.1.0"
jsonpointer "^5.0.1"
langchainhub "~0.0.8"
langsmith "~0.1.7"
langsmith "~0.1.30"
ml-distance "^4.0.0"
openapi-types "^12.1.3"
p-retry "4"
uuid "^9.0.0"
uuid "^10.0.0"
yaml "^2.2.1"
zod "^3.22.4"
zod-to-json-schema "^3.22.3"
@ -21831,10 +21907,10 @@ langchainhub@~0.0.8:
resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.8.tgz#fd4b96dc795e22e36c1a20bad31b61b0c33d3110"
integrity sha512-Woyb8YDHgqqTOZvWIbm2CaFDGfZ4NTSyXV687AG4vXEfoNo7cGQp7nhl7wL3ehenKWmNEmcxCLgOZzW8jE6lOQ==
langsmith@^0.1.30, langsmith@~0.1.1, langsmith@~0.1.7:
version "0.1.30"
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.30.tgz#3000e441605b26e15a87fb991a3929c944edbc0a"
integrity sha512-g8f10H1iiRjCweXJjgM3Y9xl6ApCa1OThDvc0BlSDLVrGVPy1on9wT39vAzYkeadC7oG48p7gfpGlYH3kLkJ9Q==
langsmith@^0.1.37, langsmith@~0.1.30:
version "0.1.37"
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.37.tgz#7e7bf6fce3eab2a9e95221d5879820ec29d0aa60"
integrity sha512-8JgWykdJywdKWs+WeefOEf4Gkz3YdNkvG5u5JPbgXuodTUwuHPwjmblsldt1OGKkPp7iCWfdtCdnc9z9MYC/Dw==
dependencies:
"@types/uuid" "^9.0.1"
commander "^10.0.1"
@ -24396,10 +24472,10 @@ open@^8.0.9, open@^8.4.0, open@~8.4.0:
is-docker "^2.1.1"
is-wsl "^2.2.0"
openai@^4.24.1, openai@^4.41.1:
version "4.47.1"
resolved "https://registry.yarnpkg.com/openai/-/openai-4.47.1.tgz#1d23c7a8eb3d7bcdc69709cd905f4c9af0181dba"
integrity sha512-WWSxhC/69ZhYWxH/OBsLEirIjUcfpQ5+ihkXKp06hmeYXgBBIUCa9IptMzYx6NdkiOCsSGYCnTIsxaic3AjRCQ==
openai@^4.24.1, openai@^4.49.1:
version "4.51.0"
resolved "https://registry.yarnpkg.com/openai/-/openai-4.51.0.tgz#8ab08bba2441375e8e4ce6161f9ac987d2b2c157"
integrity sha512-UKuWc3/qQyklqhHM8CbdXCv0Z0obap6T0ECdcO5oATQxAbKE5Ky3YCXFQY207z+eGG6ez4U9wvAcuMygxhmStg==
dependencies:
"@types/node" "^18.11.18"
"@types/node-fetch" "^2.6.4"
@ -30454,7 +30530,7 @@ tslib@2.6.2, tslib@^2.0.0, tslib@^2.0.1, tslib@^2.0.3, tslib@^2.1.0, tslib@^2.3.
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
tslib@^1.10.0, tslib@^1.11.1, tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3:
tslib@^1.10.0, tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3:
version "1.14.1"
resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
@ -31177,6 +31253,11 @@ uuid@9.0.0:
resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.0.tgz#592f550650024a38ceb0c562f2f6aa435761efb5"
integrity sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==
uuid@^10.0.0:
version "10.0.0"
resolved "https://registry.yarnpkg.com/uuid/-/uuid-10.0.0.tgz#5a95aa454e6e002725c79055fd42aaba30ca6294"
integrity sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==
uuid@^3.3.2, uuid@^3.3.3:
version "3.4.0"
resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee"
@ -32684,16 +32765,21 @@ zip-stream@^4.1.0:
compress-commons "^4.1.0"
readable-stream "^3.6.0"
zod-to-json-schema@^3.22.3, zod-to-json-schema@^3.22.5, zod-to-json-schema@^3.23.0:
zod-to-json-schema@^3.22.3, zod-to-json-schema@^3.22.4, zod-to-json-schema@^3.22.5, zod-to-json-schema@^3.23.0:
version "3.23.0"
resolved "https://registry.yarnpkg.com/zod-to-json-schema/-/zod-to-json-schema-3.23.0.tgz#4fc60e88d3c709eedbfaae3f92f8a7bf786469f2"
integrity sha512-az0uJ243PxsRIa2x1WmNE/pnuA05gUq/JB8Lwe1EDCCL/Fz9MgjYQ0fPlyc2Tcv6aF2ZA7WM5TWaRZVEFaAIag==
zod@3.22.4, zod@^3.22.3, zod@^3.22.4:
zod@3.22.4:
version "3.22.4"
resolved "https://registry.yarnpkg.com/zod/-/zod-3.22.4.tgz#f31c3a9386f61b1f228af56faa9255e845cf3fff"
integrity sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==
zod@^3.22.3, zod@^3.22.4, zod@^3.23.8:
version "3.23.8"
resolved "https://registry.yarnpkg.com/zod/-/zod-3.23.8.tgz#e37b957b5d52079769fb8097099b592f0ef4067d"
integrity sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==
zwitch@^1.0.0:
version "1.0.5"
resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-1.0.5.tgz#d11d7381ffed16b742f6af7b3f223d5cd9fe9920"