mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 17:28:26 -04:00
[Security Assistant] Vertex chat model (#193032)
This commit is contained in:
parent
ef3bc96e52
commit
aae8c50f40
19 changed files with 941 additions and 320 deletions
|
@ -82,6 +82,7 @@
|
|||
"**/@bazel/typescript/protobufjs": "6.11.4",
|
||||
"**/@hello-pangea/dnd": "16.6.0",
|
||||
"**/@langchain/core": "^0.2.18",
|
||||
"**/@langchain/google-common": "^0.1.1",
|
||||
"**/@types/node": "20.10.5",
|
||||
"**/@typescript-eslint/utils": "5.62.0",
|
||||
"**/chokidar": "^3.5.3",
|
||||
|
@ -999,7 +1000,9 @@
|
|||
"@kbn/zod-helpers": "link:packages/kbn-zod-helpers",
|
||||
"@langchain/community": "0.2.18",
|
||||
"@langchain/core": "^0.2.18",
|
||||
"@langchain/google-genai": "^0.0.23",
|
||||
"@langchain/google-common": "^0.1.1",
|
||||
"@langchain/google-genai": "^0.1.0",
|
||||
"@langchain/google-vertexai": "^0.1.0",
|
||||
"@langchain/langgraph": "0.0.34",
|
||||
"@langchain/openai": "^0.1.3",
|
||||
"@langtrase/trace-attributes": "^3.0.8",
|
||||
|
@ -1148,7 +1151,7 @@
|
|||
"jsts": "^1.6.2",
|
||||
"kea": "^2.6.0",
|
||||
"langchain": "^0.2.11",
|
||||
"langsmith": "^0.1.39",
|
||||
"langsmith": "^0.1.55",
|
||||
"launchdarkly-js-client-sdk": "^3.4.0",
|
||||
"load-json-file": "^6.2.0",
|
||||
"lodash": "^4.17.21",
|
||||
|
|
|
@ -10,6 +10,7 @@ import { ActionsClientChatOpenAI } from './language_models/chat_openai';
|
|||
import { ActionsClientLlm } from './language_models/llm';
|
||||
import { ActionsClientSimpleChatModel } from './language_models/simple_chat_model';
|
||||
import { ActionsClientGeminiChatModel } from './language_models/gemini_chat';
|
||||
import { ActionsClientChatVertexAI } from './language_models/chat_vertex';
|
||||
import { parseBedrockStream } from './utils/bedrock';
|
||||
import { parseGeminiResponse } from './utils/gemini';
|
||||
import { getDefaultArguments } from './language_models/constants';
|
||||
|
@ -20,6 +21,7 @@ export {
|
|||
getDefaultArguments,
|
||||
ActionsClientBedrockChatModel,
|
||||
ActionsClientChatOpenAI,
|
||||
ActionsClientChatVertexAI,
|
||||
ActionsClientGeminiChatModel,
|
||||
ActionsClientLlm,
|
||||
ActionsClientSimpleChatModel,
|
||||
|
|
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import { PassThrough } from 'stream';
|
||||
import { loggerMock } from '@kbn/logging-mocks';
|
||||
import { actionsClientMock } from '@kbn/actions-plugin/server/actions_client/actions_client.mock';
|
||||
|
||||
import { BaseMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
|
||||
import { ActionsClientChatVertexAI } from './chat_vertex';
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
||||
|
||||
const connectorId = 'mock-connector-id';
|
||||
|
||||
const mockExecute = jest.fn();
|
||||
const actionsClient = actionsClientMock.create();
|
||||
|
||||
const mockLogger = loggerMock.create();
|
||||
|
||||
const mockStreamExecute = jest.fn().mockImplementation(() => {
|
||||
const passThrough = new PassThrough();
|
||||
|
||||
// Write the data chunks to the stream
|
||||
setTimeout(() => {
|
||||
passThrough.write(
|
||||
Buffer.from(
|
||||
`data: {"candidates": [{"content": {"role": "model","parts": [{"text": "token1"}]}}],"modelVersion": "gemini-1.5-pro-001"}`
|
||||
)
|
||||
);
|
||||
});
|
||||
setTimeout(() => {
|
||||
passThrough.write(
|
||||
Buffer.from(
|
||||
`data: {"candidates": [{"content": {"role": "model","parts": [{"text": "token2"}]}}],"modelVersion": "gemini-1.5-pro-001"}`
|
||||
)
|
||||
);
|
||||
});
|
||||
setTimeout(() => {
|
||||
passThrough.write(
|
||||
Buffer.from(
|
||||
`data: {"candidates": [{"content": {"role": "model","parts": [{"text": "token3"}]}}],"modelVersion": "gemini-1.5-pro-001"}`
|
||||
)
|
||||
);
|
||||
// End the stream
|
||||
passThrough.end();
|
||||
});
|
||||
|
||||
return {
|
||||
data: passThrough, // PassThrough stream will act as the async iterator
|
||||
status: 'ok',
|
||||
};
|
||||
});
|
||||
|
||||
const callMessages = [
|
||||
new SystemMessage('Answer the following questions truthfully and as best you can.'),
|
||||
new HumanMessage('Question: Do you know my name?\n\n'),
|
||||
] as unknown as BaseMessage[];
|
||||
|
||||
const callOptions = {
|
||||
stop: ['\n'],
|
||||
recursionLimit: 0,
|
||||
/** Maximum number of parallel calls to make. */
|
||||
maxConcurrency: 0,
|
||||
};
|
||||
const handleLLMNewToken = jest.fn();
|
||||
const callRunManager = {
|
||||
handleLLMNewToken,
|
||||
} as unknown as CallbackManagerForLLMRun;
|
||||
const onFailedAttempt = jest.fn();
|
||||
const defaultArgs = {
|
||||
actionsClient,
|
||||
connectorId,
|
||||
logger: mockLogger,
|
||||
streaming: false,
|
||||
maxRetries: 0,
|
||||
onFailedAttempt,
|
||||
};
|
||||
|
||||
const testMessage = 'Yes, your name is Andrew. How can I assist you further, Andrew?';
|
||||
|
||||
export const mockActionResponse = {
|
||||
candidates: [
|
||||
{
|
||||
content: {
|
||||
role: 'model',
|
||||
parts: [
|
||||
{
|
||||
text: testMessage,
|
||||
},
|
||||
],
|
||||
},
|
||||
finishReason: 'STOP',
|
||||
},
|
||||
],
|
||||
usageMetadata: { input_tokens: 4, output_tokens: 10, total_tokens: 14 },
|
||||
};
|
||||
|
||||
describe('ActionsClientChatVertexAI', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
actionsClient.execute.mockImplementation(
|
||||
jest.fn().mockImplementation(() => ({
|
||||
data: mockActionResponse,
|
||||
status: 'ok',
|
||||
}))
|
||||
);
|
||||
mockExecute.mockImplementation(() => ({
|
||||
data: mockActionResponse,
|
||||
status: 'ok',
|
||||
}));
|
||||
});
|
||||
|
||||
describe('_generate streaming: false', () => {
|
||||
it('returns the expected content when _generate is invoked', async () => {
|
||||
const actionsClientChatVertexAI = new ActionsClientChatVertexAI(defaultArgs);
|
||||
|
||||
const result = await actionsClientChatVertexAI._generate(
|
||||
callMessages,
|
||||
callOptions,
|
||||
callRunManager
|
||||
);
|
||||
const subAction = actionsClient.execute.mock.calls[0][0].params.subAction;
|
||||
expect(subAction).toEqual('invokeAIRaw');
|
||||
|
||||
expect(result.generations[0].text).toEqual(testMessage);
|
||||
});
|
||||
|
||||
it('rejects with the expected error when the action result status is error', async () => {
|
||||
const hasErrorStatus = jest.fn().mockImplementation(() => {
|
||||
throw new Error(
|
||||
'ActionsClientChatVertexAI: action result status is error: action-result-message - action-result-service-message'
|
||||
);
|
||||
});
|
||||
|
||||
actionsClient.execute.mockRejectedValueOnce(hasErrorStatus);
|
||||
|
||||
const actionsClientChatVertexAI = new ActionsClientChatVertexAI({
|
||||
...defaultArgs,
|
||||
actionsClient,
|
||||
});
|
||||
|
||||
await expect(
|
||||
actionsClientChatVertexAI._generate(callMessages, callOptions, callRunManager)
|
||||
).rejects.toThrowError();
|
||||
expect(onFailedAttempt).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('rejects with the expected error the message has invalid content', async () => {
|
||||
actionsClient.execute.mockImplementation(
|
||||
jest.fn().mockResolvedValue({
|
||||
data: {
|
||||
Bad: true,
|
||||
finishReason: 'badness',
|
||||
},
|
||||
status: 'ok',
|
||||
})
|
||||
);
|
||||
|
||||
const actionsClientChatVertexAI = new ActionsClientChatVertexAI(defaultArgs);
|
||||
|
||||
await expect(
|
||||
actionsClientChatVertexAI._generate(callMessages, callOptions, callRunManager)
|
||||
).rejects.toThrowError("Cannot read properties of undefined (reading 'text')");
|
||||
});
|
||||
});
|
||||
|
||||
describe('*_streamResponseChunks', () => {
|
||||
it('iterates over gemini chunks', async () => {
|
||||
actionsClient.execute.mockImplementationOnce(mockStreamExecute);
|
||||
|
||||
const actionsClientChatVertexAI = new ActionsClientChatVertexAI({
|
||||
...defaultArgs,
|
||||
actionsClient,
|
||||
streaming: true,
|
||||
});
|
||||
|
||||
const gen = actionsClientChatVertexAI._streamResponseChunks(
|
||||
callMessages,
|
||||
callOptions,
|
||||
callRunManager
|
||||
);
|
||||
|
||||
const chunks = [];
|
||||
|
||||
for await (const chunk of gen) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
expect(chunks.map((c) => c.text)).toEqual(['token1', 'token2', 'token3']);
|
||||
expect(handleLLMNewToken).toHaveBeenCalledTimes(3);
|
||||
expect(handleLLMNewToken).toHaveBeenCalledWith('token1');
|
||||
expect(handleLLMNewToken).toHaveBeenCalledWith('token2');
|
||||
expect(handleLLMNewToken).toHaveBeenCalledWith('token3');
|
||||
});
|
||||
});
|
||||
});
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import { EnhancedGenerateContentResponse } from '@google/generative-ai';
|
||||
import { ActionsClient } from '@kbn/actions-plugin/server';
|
||||
import { PublicMethodsOf } from '@kbn/utility-types';
|
||||
import { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
||||
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
||||
import { ChatVertexAI } from '@langchain/google-vertexai';
|
||||
import { get } from 'lodash/fp';
|
||||
import { Readable } from 'stream';
|
||||
|
||||
import { Logger } from '@kbn/logging';
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
||||
import { GeminiPartText } from '@langchain/google-common/dist/types';
|
||||
import {
|
||||
convertResponseBadFinishReasonToErrorMsg,
|
||||
convertResponseContentToChatGenerationChunk,
|
||||
} from '../../utils/gemini';
|
||||
import { ActionsClientChatConnection } from './connection';
|
||||
|
||||
const DEFAULT_GEMINI_TEMPERATURE = 0;
|
||||
export interface CustomChatModelInput extends BaseChatModelParams {
|
||||
actionsClient: PublicMethodsOf<ActionsClient>;
|
||||
connectorId: string;
|
||||
logger: Logger;
|
||||
streaming: boolean;
|
||||
temperature?: number;
|
||||
signal?: AbortSignal;
|
||||
model?: string;
|
||||
maxTokens?: number;
|
||||
}
|
||||
|
||||
export class ActionsClientChatVertexAI extends ChatVertexAI {
|
||||
#actionsClient: PublicMethodsOf<ActionsClient>;
|
||||
#connectorId: string;
|
||||
#model?: string;
|
||||
constructor({ actionsClient, connectorId, ...props }: CustomChatModelInput) {
|
||||
super({
|
||||
...props,
|
||||
maxOutputTokens: props.maxTokens ?? 2048,
|
||||
temperature: props.temperature ?? DEFAULT_GEMINI_TEMPERATURE,
|
||||
});
|
||||
// LangChain needs model to be defined for logging purposes
|
||||
this.model = props.model ?? this.model;
|
||||
// If model is not specified by consumer, the connector will define it so do not pass
|
||||
// a LangChain default to the actionsClient
|
||||
this.#model = props.model;
|
||||
this.#actionsClient = actionsClient;
|
||||
this.#connectorId = connectorId;
|
||||
const client = this.buildClient(props);
|
||||
this.connection = new ActionsClientChatConnection(
|
||||
{
|
||||
...this,
|
||||
},
|
||||
this.caller,
|
||||
client,
|
||||
false,
|
||||
actionsClient,
|
||||
connectorId
|
||||
);
|
||||
}
|
||||
|
||||
buildConnection() {
|
||||
// prevent ChatVertexAI from overwriting our this.connection defined in super
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
const parameters = this.invocationParams(options);
|
||||
const data = await this.connection.formatData(messages, parameters);
|
||||
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
const systemPart: GeminiPartText | undefined = data?.systemInstruction
|
||||
?.parts?.[0] as unknown as GeminiPartText;
|
||||
const systemInstruction = systemPart?.text.length
|
||||
? { systemInstruction: systemPart?.text }
|
||||
: {};
|
||||
const requestBody = {
|
||||
actionId: this.#connectorId,
|
||||
params: {
|
||||
subAction: 'invokeStream',
|
||||
subActionParams: {
|
||||
model: this.#model,
|
||||
messages: data?.contents,
|
||||
tools: data?.tools,
|
||||
temperature: this.temperature,
|
||||
...systemInstruction,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const actionResult = await this.#actionsClient.execute(requestBody);
|
||||
|
||||
if (actionResult.status === 'error') {
|
||||
throw new Error(
|
||||
`ActionsClientChatVertexAI: action result status is error: ${actionResult?.message} - ${actionResult?.serviceMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
const readable = get('data', actionResult) as Readable;
|
||||
|
||||
if (typeof readable?.read !== 'function') {
|
||||
throw new Error('Action result status is error: result is not streamable');
|
||||
}
|
||||
return readable;
|
||||
});
|
||||
let usageMetadata: UsageMetadata | undefined;
|
||||
let index = 0;
|
||||
let partialStreamChunk = '';
|
||||
for await (const rawStreamChunk of stream) {
|
||||
const streamChunk = rawStreamChunk.toString();
|
||||
const nextChunk = `${partialStreamChunk + streamChunk}`;
|
||||
|
||||
let parsedStreamChunk: EnhancedGenerateContentResponse | null = null;
|
||||
try {
|
||||
parsedStreamChunk = JSON.parse(nextChunk.replaceAll('data: ', '').replaceAll('\r\n', ''));
|
||||
partialStreamChunk = '';
|
||||
} catch (_) {
|
||||
partialStreamChunk += nextChunk;
|
||||
}
|
||||
|
||||
if (parsedStreamChunk !== null && !parsedStreamChunk.candidates?.[0]?.finishReason) {
|
||||
const response = {
|
||||
...parsedStreamChunk,
|
||||
functionCalls: () =>
|
||||
parsedStreamChunk?.candidates?.[0]?.content.parts[0].functionCall
|
||||
? [parsedStreamChunk.candidates?.[0]?.content.parts[0].functionCall]
|
||||
: [],
|
||||
};
|
||||
|
||||
if (
|
||||
'usageMetadata' in response &&
|
||||
this.streamUsage !== false &&
|
||||
options.streamUsage !== false
|
||||
) {
|
||||
const genAIUsageMetadata = response.usageMetadata as {
|
||||
promptTokenCount: number;
|
||||
candidatesTokenCount: number;
|
||||
totalTokenCount: number;
|
||||
};
|
||||
if (!usageMetadata) {
|
||||
usageMetadata = {
|
||||
input_tokens: genAIUsageMetadata.promptTokenCount,
|
||||
output_tokens: genAIUsageMetadata.candidatesTokenCount,
|
||||
total_tokens: genAIUsageMetadata.totalTokenCount,
|
||||
};
|
||||
} else {
|
||||
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
|
||||
// total each time, so we need to find the difference between the tokens.
|
||||
const outputTokenDiff =
|
||||
genAIUsageMetadata.candidatesTokenCount - usageMetadata.output_tokens;
|
||||
usageMetadata = {
|
||||
input_tokens: 0,
|
||||
output_tokens: outputTokenDiff,
|
||||
total_tokens: outputTokenDiff,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
const chunk = convertResponseContentToChatGenerationChunk(response, {
|
||||
usageMetadata,
|
||||
index,
|
||||
});
|
||||
index += 1;
|
||||
|
||||
if (chunk) {
|
||||
yield chunk;
|
||||
await runManager?.handleLLMNewToken(chunk.text ?? '');
|
||||
}
|
||||
} else if (parsedStreamChunk) {
|
||||
// handle bad finish reason
|
||||
const errorMessage = convertResponseBadFinishReasonToErrorMsg(parsedStreamChunk);
|
||||
if (errorMessage != null) {
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import {
|
||||
ChatConnection,
|
||||
GoogleAbstractedClient,
|
||||
GoogleAIBaseLLMInput,
|
||||
GoogleLLMResponse,
|
||||
} from '@langchain/google-common';
|
||||
import { ActionsClient } from '@kbn/actions-plugin/server';
|
||||
import { PublicMethodsOf } from '@kbn/utility-types';
|
||||
import { EnhancedGenerateContentResponse } from '@google/generative-ai';
|
||||
import { AsyncCaller } from '@langchain/core/utils/async_caller';
|
||||
import { convertResponseBadFinishReasonToErrorMsg } from '../../utils/gemini';
|
||||
|
||||
// only implements non-streaming requests
|
||||
// stream is handled by ActionsClientChatVertexAI.*_streamResponseChunks
|
||||
export class ActionsClientChatConnection<Auth> extends ChatConnection<Auth> {
|
||||
actionsClient: PublicMethodsOf<ActionsClient>;
|
||||
connectorId: string;
|
||||
#model?: string;
|
||||
temperature: number;
|
||||
caller: AsyncCaller;
|
||||
constructor(
|
||||
fields: GoogleAIBaseLLMInput<Auth>,
|
||||
caller: AsyncCaller,
|
||||
client: GoogleAbstractedClient,
|
||||
_streaming: boolean, // defaulting to false in the super
|
||||
actionsClient: PublicMethodsOf<ActionsClient>,
|
||||
connectorId: string
|
||||
) {
|
||||
super(fields, caller, client, false);
|
||||
this.actionsClient = actionsClient;
|
||||
this.connectorId = connectorId;
|
||||
this.caller = caller;
|
||||
this.#model = fields.model;
|
||||
this.temperature = fields.temperature ?? 0;
|
||||
}
|
||||
|
||||
async _request(
|
||||
// TODO better types here
|
||||
data: {
|
||||
contents: unknown;
|
||||
tools: unknown[];
|
||||
systemInstruction?: { parts: [{ text: string }] };
|
||||
},
|
||||
options: { signal?: AbortSignal }
|
||||
) {
|
||||
const systemInstruction = data?.systemInstruction?.parts?.[0]?.text.length
|
||||
? { systemInstruction: data?.systemInstruction?.parts?.[0]?.text }
|
||||
: {};
|
||||
return this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
try {
|
||||
const requestBody = {
|
||||
actionId: this.connectorId,
|
||||
params: {
|
||||
subAction: 'invokeAIRaw',
|
||||
subActionParams: {
|
||||
model: this.#model,
|
||||
messages: data?.contents,
|
||||
tools: data?.tools,
|
||||
temperature: this.temperature,
|
||||
...systemInstruction,
|
||||
},
|
||||
},
|
||||
};
|
||||
const actionResult = (await this.actionsClient.execute(requestBody)) as {
|
||||
status: string;
|
||||
data: EnhancedGenerateContentResponse;
|
||||
message?: string;
|
||||
serviceMessage?: string;
|
||||
};
|
||||
|
||||
if (actionResult.status === 'error') {
|
||||
throw new Error(
|
||||
`ActionsClientChatVertexAI: action result status is error: ${actionResult?.message} - ${actionResult?.serviceMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
if (actionResult.data.candidates && actionResult.data.candidates.length > 0) {
|
||||
// handle bad finish reason
|
||||
const errorMessage = convertResponseBadFinishReasonToErrorMsg(actionResult.data);
|
||||
if (errorMessage != null) {
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
}
|
||||
return actionResult as unknown as GoogleLLMResponse;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} catch (e: any) {
|
||||
// TODO: Improve error handling
|
||||
if (e.message?.includes('400 Bad Request')) {
|
||||
e.status = 400;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
export * from './chat_vertex';
|
|
@ -8,34 +8,24 @@
|
|||
import {
|
||||
Content,
|
||||
EnhancedGenerateContentResponse,
|
||||
FunctionCallPart,
|
||||
FunctionResponsePart,
|
||||
GenerateContentRequest,
|
||||
GenerateContentResult,
|
||||
InlineDataPart,
|
||||
POSSIBLE_ROLES,
|
||||
Part,
|
||||
TextPart,
|
||||
FinishReason,
|
||||
SafetyRating,
|
||||
} from '@google/generative-ai';
|
||||
import { ActionsClient } from '@kbn/actions-plugin/server';
|
||||
import { PublicMethodsOf } from '@kbn/utility-types';
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
|
||||
import { ToolCallChunk } from '@langchain/core/dist/messages/tool';
|
||||
import {
|
||||
AIMessageChunk,
|
||||
BaseMessage,
|
||||
ChatMessage,
|
||||
isBaseMessage,
|
||||
UsageMetadata,
|
||||
} from '@langchain/core/messages';
|
||||
import { BaseMessage, UsageMetadata } from '@langchain/core/messages';
|
||||
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
||||
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
||||
import { Logger } from '@kbn/logging';
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models';
|
||||
import { get } from 'lodash/fp';
|
||||
import { Readable } from 'stream';
|
||||
import {
|
||||
convertBaseMessagesToContent,
|
||||
convertResponseBadFinishReasonToErrorMsg,
|
||||
convertResponseContentToChatGenerationChunk,
|
||||
} from '../utils/gemini';
|
||||
const DEFAULT_GEMINI_TEMPERATURE = 0;
|
||||
|
||||
export interface CustomChatModelInput extends BaseChatModelParams {
|
||||
|
@ -48,12 +38,6 @@ export interface CustomChatModelInput extends BaseChatModelParams {
|
|||
maxTokens?: number;
|
||||
}
|
||||
|
||||
// not sure why these properties are not on the type, as they are on the data
|
||||
interface SafetyReason extends SafetyRating {
|
||||
blocked: boolean;
|
||||
severity: string;
|
||||
}
|
||||
|
||||
export class ActionsClientGeminiChatModel extends ChatGoogleGenerativeAI {
|
||||
#actionsClient: PublicMethodsOf<ActionsClient>;
|
||||
#connectorId: string;
|
||||
|
@ -265,257 +249,3 @@ export class ActionsClientGeminiChatModel extends ChatGoogleGenerativeAI {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function convertResponseContentToChatGenerationChunk(
|
||||
response: EnhancedGenerateContentResponse,
|
||||
extra: {
|
||||
usageMetadata?: UsageMetadata | undefined;
|
||||
index: number;
|
||||
}
|
||||
): ChatGenerationChunk | null {
|
||||
if (!response.candidates || response.candidates.length === 0) {
|
||||
return null;
|
||||
}
|
||||
const functionCalls = response.functionCalls();
|
||||
const [candidate] = response.candidates;
|
||||
const { content, ...generationInfo } = candidate;
|
||||
const text = content?.parts[0]?.text ?? '';
|
||||
|
||||
const toolCallChunks: ToolCallChunk[] = [];
|
||||
if (functionCalls) {
|
||||
toolCallChunks.push(
|
||||
...functionCalls.map((fc) => ({
|
||||
...fc,
|
||||
args: JSON.stringify(fc.args),
|
||||
index: extra.index,
|
||||
type: 'tool_call_chunk' as const,
|
||||
}))
|
||||
);
|
||||
}
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
tool_call_chunks: toolCallChunks,
|
||||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {},
|
||||
usage_metadata: extra.usageMetadata,
|
||||
}),
|
||||
generationInfo,
|
||||
});
|
||||
}
|
||||
|
||||
export function convertAuthorToRole(author: string): (typeof POSSIBLE_ROLES)[number] {
|
||||
switch (author) {
|
||||
/**
|
||||
* Note: Gemini currently is not supporting system messages
|
||||
* we will convert them to human messages and merge with following
|
||||
* */
|
||||
case 'ai':
|
||||
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
||||
return 'model';
|
||||
case 'system':
|
||||
case 'human':
|
||||
return 'user';
|
||||
case 'tool':
|
||||
case 'function':
|
||||
return 'function';
|
||||
default:
|
||||
throw new Error(`Unknown / unsupported author: ${author}`);
|
||||
}
|
||||
}
|
||||
export function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
|
||||
return messages.reduce<{
|
||||
content: Content[];
|
||||
mergeWithPreviousContent: boolean;
|
||||
}>(
|
||||
(acc, message, index) => {
|
||||
if (!isBaseMessage(message)) {
|
||||
throw new Error('Unsupported message input');
|
||||
}
|
||||
const author = getMessageAuthor(message);
|
||||
if (author === 'system' && index !== 0) {
|
||||
throw new Error('System message should be the first one');
|
||||
}
|
||||
const role = convertAuthorToRole(author);
|
||||
const parts = convertMessageContentToParts(message, isMultimodalModel);
|
||||
|
||||
if (acc.mergeWithPreviousContent) {
|
||||
const prevContent = acc.content[acc.content.length - 1];
|
||||
if (!prevContent) {
|
||||
throw new Error(
|
||||
'There was a problem parsing your system message. Please try a prompt without one.'
|
||||
);
|
||||
}
|
||||
prevContent.parts.push(...parts);
|
||||
|
||||
return {
|
||||
mergeWithPreviousContent: false,
|
||||
content: acc.content,
|
||||
};
|
||||
}
|
||||
let actualRole = role;
|
||||
if (actualRole === 'function') {
|
||||
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
||||
actualRole = 'user';
|
||||
}
|
||||
const content: Content = {
|
||||
role: actualRole,
|
||||
parts,
|
||||
};
|
||||
return {
|
||||
mergeWithPreviousContent: author === 'system',
|
||||
content: [...acc.content, content],
|
||||
};
|
||||
},
|
||||
{ content: [], mergeWithPreviousContent: false }
|
||||
).content;
|
||||
}
|
||||
|
||||
export function convertMessageContentToParts(
|
||||
message: BaseMessage,
|
||||
isMultimodalModel: boolean
|
||||
): Part[] {
|
||||
if (typeof message.content === 'string' && message.content !== '') {
|
||||
return [{ text: message.content }];
|
||||
}
|
||||
|
||||
let functionCalls: FunctionCallPart[] = [];
|
||||
let functionResponses: FunctionResponsePart[] = [];
|
||||
let messageParts: Part[] = [];
|
||||
|
||||
if (
|
||||
'tool_calls' in message &&
|
||||
Array.isArray(message.tool_calls) &&
|
||||
message.tool_calls.length > 0
|
||||
) {
|
||||
functionCalls = message.tool_calls.map((tc) => ({
|
||||
functionCall: {
|
||||
name: tc.name,
|
||||
args: tc.args,
|
||||
},
|
||||
}));
|
||||
} else if (message._getType() === 'tool' && message.name && message.content) {
|
||||
functionResponses = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: message.name,
|
||||
response: message.content,
|
||||
},
|
||||
},
|
||||
];
|
||||
} else if (Array.isArray(message.content)) {
|
||||
messageParts = message.content.map((c) => {
|
||||
if (c.type === 'text') {
|
||||
return {
|
||||
text: c.text,
|
||||
} as TextPart;
|
||||
}
|
||||
|
||||
if (c.type === 'image_url') {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error(`This model does not support images`);
|
||||
}
|
||||
let source;
|
||||
if (typeof c.image_url === 'string') {
|
||||
source = c.image_url;
|
||||
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
|
||||
source = c.image_url.url;
|
||||
} else {
|
||||
throw new Error('Please provide image as base64 encoded data URL');
|
||||
}
|
||||
const [dm, data] = source.split(',');
|
||||
if (!dm.startsWith('data:')) {
|
||||
throw new Error('Please provide image as base64 encoded data URL');
|
||||
}
|
||||
|
||||
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';');
|
||||
if (encoding !== 'base64') {
|
||||
throw new Error('Please provide image as base64 encoded data URL');
|
||||
}
|
||||
|
||||
return {
|
||||
inlineData: {
|
||||
data,
|
||||
mimeType,
|
||||
},
|
||||
} as InlineDataPart;
|
||||
} else if (c.type === 'media') {
|
||||
return messageContentMedia(c);
|
||||
} else if (c.type === 'tool_use') {
|
||||
return {
|
||||
functionCall: {
|
||||
name: c.name,
|
||||
args: c.input,
|
||||
},
|
||||
} as FunctionCallPart;
|
||||
}
|
||||
throw new Error(`Unknown content type ${(c as { type: string }).type}`);
|
||||
});
|
||||
}
|
||||
|
||||
return [...messageParts, ...functionCalls, ...functionResponses];
|
||||
}
|
||||
|
||||
export function getMessageAuthor(message: BaseMessage) {
|
||||
const type = message._getType();
|
||||
if (ChatMessage.isInstance(message)) {
|
||||
return message.role;
|
||||
}
|
||||
if (type === 'tool') {
|
||||
return type;
|
||||
}
|
||||
return message.name ?? type;
|
||||
}
|
||||
|
||||
// will be removed once FileDataPart is supported in @langchain/google-genai
|
||||
function messageContentMedia(content: Record<string, unknown>): InlineDataPart {
|
||||
if ('mimeType' in content && 'data' in content) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: content.mimeType,
|
||||
data: content.data,
|
||||
},
|
||||
} as InlineDataPart;
|
||||
}
|
||||
throw new Error('Invalid media content');
|
||||
}
|
||||
|
||||
const badFinishReasons = [FinishReason.RECITATION, FinishReason.SAFETY];
|
||||
function hadBadFinishReason(candidate: { finishReason?: FinishReason }) {
|
||||
return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason);
|
||||
}
|
||||
|
||||
export function convertResponseBadFinishReasonToErrorMsg(
|
||||
response: EnhancedGenerateContentResponse
|
||||
): string | null {
|
||||
if (response.candidates && response.candidates.length > 0) {
|
||||
const candidate = response.candidates[0];
|
||||
if (hadBadFinishReason(candidate)) {
|
||||
if (
|
||||
candidate.finishReason === FinishReason.SAFETY &&
|
||||
candidate.safetyRatings &&
|
||||
(candidate.safetyRatings?.length ?? 0) > 0
|
||||
) {
|
||||
const safetyReasons = getSafetyReasons(candidate.safetyRatings as SafetyReason[]);
|
||||
return `ActionsClientGeminiChatModel: action result status is error. Candidate was blocked due to ${candidate.finishReason} - ${safetyReasons}`;
|
||||
} else {
|
||||
return `ActionsClientGeminiChatModel: action result status is error. Candidate was blocked due to ${candidate.finishReason}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const getSafetyReasons = (safetyRatings: SafetyReason[]) => {
|
||||
const reasons = safetyRatings.filter((t: SafetyReason) => t.blocked);
|
||||
return reasons.reduce(
|
||||
(acc: string, t: SafetyReason, i: number) =>
|
||||
`${acc.length ? `${acc} ` : ''}${t.category}: ${t.severity}${
|
||||
i < reasons.length - 1 ? ',' : ''
|
||||
}`,
|
||||
''
|
||||
);
|
||||
};
|
||||
|
|
|
@ -8,5 +8,6 @@
|
|||
export { ActionsClientBedrockChatModel } from './bedrock_chat';
|
||||
export { ActionsClientChatOpenAI } from './chat_openai';
|
||||
export { ActionsClientGeminiChatModel } from './gemini_chat';
|
||||
export { ActionsClientChatVertexAI } from './chat_vertex';
|
||||
export { ActionsClientLlm } from './llm';
|
||||
export { ActionsClientSimpleChatModel } from './simple_chat_model';
|
||||
|
|
|
@ -6,9 +6,306 @@
|
|||
*/
|
||||
|
||||
import { Logger } from '@kbn/core/server';
|
||||
import {
|
||||
Content,
|
||||
EnhancedGenerateContentResponse,
|
||||
FinishReason,
|
||||
FunctionCallPart,
|
||||
FunctionResponsePart,
|
||||
InlineDataPart,
|
||||
Part,
|
||||
POSSIBLE_ROLES,
|
||||
SafetyRating,
|
||||
TextPart,
|
||||
} from '@google/generative-ai';
|
||||
import {
|
||||
AIMessageChunk,
|
||||
BaseMessage,
|
||||
ChatMessage,
|
||||
isBaseMessage,
|
||||
UsageMetadata,
|
||||
} from '@langchain/core/messages';
|
||||
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
||||
import { ToolCallChunk } from '@langchain/core/dist/messages/tool';
|
||||
import { Readable } from 'stream';
|
||||
import { StreamParser } from './types';
|
||||
|
||||
export function convertResponseContentToChatGenerationChunk(
|
||||
response: EnhancedGenerateContentResponse,
|
||||
extra: {
|
||||
usageMetadata?: UsageMetadata | undefined;
|
||||
index: number;
|
||||
}
|
||||
): ChatGenerationChunk | null {
|
||||
if (!response.candidates || response.candidates.length === 0) {
|
||||
return null;
|
||||
}
|
||||
const functionCalls = response.functionCalls();
|
||||
const [candidate] = response.candidates;
|
||||
const { content, ...generationInfo } = candidate;
|
||||
const text = content?.parts[0]?.text ?? '';
|
||||
|
||||
const toolCallChunks: ToolCallChunk[] = [];
|
||||
if (functionCalls) {
|
||||
toolCallChunks.push(
|
||||
...functionCalls.map((fc) => ({
|
||||
...fc,
|
||||
args: JSON.stringify(fc.args),
|
||||
index: extra.index,
|
||||
type: 'tool_call_chunk' as const,
|
||||
}))
|
||||
);
|
||||
}
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
tool_call_chunks: toolCallChunks,
|
||||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {},
|
||||
usage_metadata: extra.usageMetadata,
|
||||
}),
|
||||
generationInfo,
|
||||
});
|
||||
}
|
||||
|
||||
export function convertAuthorToRole(author: string): (typeof POSSIBLE_ROLES)[number] {
|
||||
switch (author) {
|
||||
/**
|
||||
* Note: Gemini currently is not supporting system messages
|
||||
* we will convert them to human messages and merge with following
|
||||
* */
|
||||
case 'ai':
|
||||
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
||||
return 'model';
|
||||
case 'system':
|
||||
case 'human':
|
||||
return 'user';
|
||||
case 'tool':
|
||||
case 'function':
|
||||
return 'function';
|
||||
default:
|
||||
throw new Error(`Unknown / unsupported author: ${author}`);
|
||||
}
|
||||
}
|
||||
export function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
|
||||
return messages.reduce<{
|
||||
content: Content[];
|
||||
mergeWithPreviousContent: boolean;
|
||||
}>(
|
||||
(acc, message, index) => {
|
||||
if (!isBaseMessage(message)) {
|
||||
throw new Error('Unsupported message input');
|
||||
}
|
||||
const author = getMessageAuthor(message);
|
||||
if (author === 'system' && index !== 0) {
|
||||
throw new Error('System message should be the first one');
|
||||
}
|
||||
const role = convertAuthorToRole(author);
|
||||
const parts = convertMessageContentToParts(message, isMultimodalModel);
|
||||
|
||||
if (acc.mergeWithPreviousContent) {
|
||||
const prevContent = acc.content[acc.content.length - 1];
|
||||
if (!prevContent) {
|
||||
throw new Error(
|
||||
'There was a problem parsing your system message. Please try a prompt without one.'
|
||||
);
|
||||
}
|
||||
prevContent.parts.push(...parts);
|
||||
|
||||
return {
|
||||
mergeWithPreviousContent: false,
|
||||
content: acc.content,
|
||||
};
|
||||
}
|
||||
let actualRole = role;
|
||||
if (actualRole === 'function') {
|
||||
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
||||
actualRole = 'user';
|
||||
}
|
||||
const content: Content = {
|
||||
role: actualRole,
|
||||
parts,
|
||||
};
|
||||
return {
|
||||
mergeWithPreviousContent: author === 'system',
|
||||
content: [...acc.content, content],
|
||||
};
|
||||
},
|
||||
{ content: [], mergeWithPreviousContent: false }
|
||||
).content;
|
||||
}
|
||||
|
||||
export function convertMessageContentToParts(
|
||||
message: BaseMessage,
|
||||
isMultimodalModel: boolean
|
||||
): Part[] {
|
||||
if (typeof message.content === 'string' && message.content !== '') {
|
||||
return [{ text: message.content }];
|
||||
}
|
||||
|
||||
let functionCalls: FunctionCallPart[] = [];
|
||||
let functionResponses: FunctionResponsePart[] = [];
|
||||
let messageParts: Part[] = [];
|
||||
|
||||
if (
|
||||
'tool_calls' in message &&
|
||||
Array.isArray(message.tool_calls) &&
|
||||
message.tool_calls.length > 0
|
||||
) {
|
||||
functionCalls = message.tool_calls.map((tc) => ({
|
||||
functionCall: {
|
||||
name: tc.name,
|
||||
args: tc.args,
|
||||
},
|
||||
}));
|
||||
} else if (message._getType() === 'tool' && message.name && message.content) {
|
||||
functionResponses = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: message.name,
|
||||
response: message.content,
|
||||
},
|
||||
},
|
||||
];
|
||||
} else if (Array.isArray(message.content)) {
|
||||
messageParts = message.content.map((c) => {
|
||||
if (c.type === 'text') {
|
||||
return {
|
||||
text: c.text,
|
||||
} as TextPart;
|
||||
}
|
||||
|
||||
if (c.type === 'image_url') {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error(`This model does not support images`);
|
||||
}
|
||||
let source;
|
||||
if (typeof c.image_url === 'string') {
|
||||
source = c.image_url;
|
||||
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
|
||||
source = c.image_url.url;
|
||||
} else {
|
||||
throw new Error('Please provide image as base64 encoded data URL');
|
||||
}
|
||||
const [dm, data] = source.split(',');
|
||||
if (!dm.startsWith('data:')) {
|
||||
throw new Error('Please provide image as base64 encoded data URL');
|
||||
}
|
||||
|
||||
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';');
|
||||
if (encoding !== 'base64') {
|
||||
throw new Error('Please provide image as base64 encoded data URL');
|
||||
}
|
||||
|
||||
return {
|
||||
inlineData: {
|
||||
data,
|
||||
mimeType,
|
||||
},
|
||||
} as InlineDataPart;
|
||||
} else if (c.type === 'media') {
|
||||
return messageContentMedia(c);
|
||||
} else if (c.type === 'tool_use') {
|
||||
return {
|
||||
functionCall: {
|
||||
name: c.name,
|
||||
args: c.input,
|
||||
},
|
||||
} as FunctionCallPart;
|
||||
}
|
||||
throw new Error(`Unknown content type ${(c as { type: string }).type}`);
|
||||
});
|
||||
}
|
||||
|
||||
return [...messageParts, ...functionCalls, ...functionResponses];
|
||||
}
|
||||
|
||||
export function getMessageAuthor(message: BaseMessage) {
|
||||
const type = message._getType();
|
||||
if (ChatMessage.isInstance(message)) {
|
||||
return message.role;
|
||||
}
|
||||
if (type === 'tool') {
|
||||
return type;
|
||||
}
|
||||
return message.name ?? type;
|
||||
}
|
||||
|
||||
// will be removed once FileDataPart is supported in @langchain/google-genai
|
||||
function messageContentMedia(content: Record<string, unknown>): InlineDataPart {
|
||||
if ('mimeType' in content && 'data' in content) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: content.mimeType,
|
||||
data: content.data,
|
||||
},
|
||||
} as InlineDataPart;
|
||||
}
|
||||
throw new Error('Invalid media content');
|
||||
}
|
||||
|
||||
// TODO Google's TS library is behind the API
|
||||
// remove this enum once the library is updated
|
||||
// https://github.com/google-gemini/generative-ai-js/pull/270
|
||||
enum FinishReasonMore {
|
||||
BLOCKLIST = 'BLOCKLIST',
|
||||
PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',
|
||||
SPII = 'SPII',
|
||||
MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL',
|
||||
}
|
||||
const badFinishReasons = [
|
||||
FinishReason.RECITATION,
|
||||
FinishReason.SAFETY,
|
||||
FinishReasonMore.BLOCKLIST,
|
||||
FinishReasonMore.PROHIBITED_CONTENT,
|
||||
FinishReasonMore.SPII,
|
||||
FinishReasonMore.MALFORMED_FUNCTION_CALL,
|
||||
];
|
||||
function hadBadFinishReason(candidate: { finishReason?: FinishReason }) {
|
||||
return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason);
|
||||
}
|
||||
|
||||
export function convertResponseBadFinishReasonToErrorMsg(
|
||||
response: EnhancedGenerateContentResponse
|
||||
): string | null {
|
||||
if (response.candidates && response.candidates.length > 0) {
|
||||
const candidate = response.candidates[0];
|
||||
if (hadBadFinishReason(candidate)) {
|
||||
if (
|
||||
candidate.finishReason === FinishReason.SAFETY &&
|
||||
candidate.safetyRatings &&
|
||||
(candidate.safetyRatings?.length ?? 0) > 0
|
||||
) {
|
||||
const safetyReasons = getSafetyReasons(candidate.safetyRatings as SafetyReason[]);
|
||||
return `Gemini Utils: action result status is error. Candidate was blocked due to ${candidate.finishReason} - ${safetyReasons}`;
|
||||
} else {
|
||||
return `Gemini Utils: action result status is error. Candidate was blocked due to ${candidate.finishReason}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// not sure why these properties are not on the type, as they are on the data
|
||||
interface SafetyReason extends SafetyRating {
|
||||
blocked: boolean;
|
||||
severity: string;
|
||||
}
|
||||
|
||||
const getSafetyReasons = (safetyRatings: SafetyReason[]) => {
|
||||
const reasons = safetyRatings.filter((t: SafetyReason) => t.blocked);
|
||||
return reasons.reduce(
|
||||
(acc: string, t: SafetyReason, i: number) =>
|
||||
`${acc.length ? `${acc} ` : ''}${t.category}: ${t.severity}${
|
||||
i < reasons.length - 1 ? ',' : ''
|
||||
}`,
|
||||
''
|
||||
);
|
||||
};
|
||||
|
||||
export const parseGeminiStreamAsAsyncIterator = async function* (
|
||||
stream: Readable,
|
||||
logger: Logger,
|
||||
|
|
|
@ -28,7 +28,7 @@ export const GENERATE_CHAT_TITLE_PROMPT = (responseLanguage: string, llmType?: s
|
|||
? ChatPromptTemplate.fromMessages([
|
||||
[
|
||||
'system',
|
||||
`You are a title generator for a helpful assistant for Elastic Security. Assume the following human message is the start of a conversation between you and a human; Do not respond to the human message, instead respond with conversation title relevant to the human's message. DO NOT UNDER ANY CIRCUMSTANCES use quotes or markdown in your response. This title is shown in a list of conversations to the human, so title it for the user, not for you. Please create the title in ${responseLanguage}. Respond with the title only with no other text explaining your response. As an example, for the given MESSAGE, this is the TITLE:
|
||||
`You are a title generator for a helpful assistant for Elastic Security. Assume the following human message is the start of a conversation between you and a human. Generate a relevant conversation title for the human's message in plain text. Make sure the title is formatted for the user, without using quotes or markdown. The title should clearly reflect the content of the message and be appropriate for a list of conversations. Please create the title in ${responseLanguage}. Respond only with the title. As an example, for the given MESSAGE, this is the TITLE:
|
||||
|
||||
MESSAGE: I am having trouble with the Elastic Security app.
|
||||
TITLE: Troubleshooting Elastic Security app issues
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
import { RunnableConfig } from '@langchain/core/runnables';
|
||||
import { AgentRunnableSequence } from 'langchain/dist/agents/agent';
|
||||
import { formatLatestUserMessage } from '../prompts';
|
||||
import { AgentState, NodeParamsBase } from '../types';
|
||||
import { NodeType } from '../constants';
|
||||
|
||||
|
@ -37,6 +38,8 @@ export async function runAgent({
|
|||
const agentOutcome = await agentRunnable.withConfig({ tags: [AGENT_NODE_TAG] }).invoke(
|
||||
{
|
||||
...state,
|
||||
// prepend any user prompt (gemini)
|
||||
input: formatLatestUserMessage(state.input, state.llmType),
|
||||
chat_history: state.messages, // TODO: Message de-dupe with ...state spread
|
||||
},
|
||||
config
|
||||
|
|
|
@ -10,8 +10,11 @@ const YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT =
|
|||
const IF_YOU_DONT_KNOW_THE_ANSWER = 'Do not answer questions unrelated to Elastic Security.';
|
||||
|
||||
export const DEFAULT_SYSTEM_PROMPT = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER}`;
|
||||
|
||||
export const GEMINI_SYSTEM_PROMPT =
|
||||
`ALWAYS use the provided tools, as they have access to the latest data and syntax.` +
|
||||
"The final response is the only output the user sees and should be a complete answer to the user's question. Do not leave out important tool output. The final response should never be empty. Don't forget to use tools.";
|
||||
// system prompt from @afirstenberg
|
||||
const BASE_GEMINI_PROMPT =
|
||||
'You are an assistant that is an expert at using tools and Elastic Security, doing your best to use these tools to answer questions or follow instructions. It is very important to use tools to answer the question or follow the instructions rather than coming up with your own answer. Tool calls are good. Sometimes you may need to make several tool calls to accomplish the task or get an answer to the question that was asked. Use as many tool calls as necessary.';
|
||||
const KB_CATCH =
|
||||
'If the knowledge base tool gives empty results, do your best to answer the question from the perspective of an expert security analyst.';
|
||||
export const GEMINI_SYSTEM_PROMPT = `${BASE_GEMINI_PROMPT} ${KB_CATCH}`;
|
||||
export const BEDROCK_SYSTEM_PROMPT = `Use tools as often as possible, as they have access to the latest data and syntax. Always return value from ESQLKnowledgeBaseTool as is. Never return <thinking> tags in the response, but make sure to include <result> tags content in the response. Do not reflect on the quality of the returned search results in your response.`;
|
||||
export const GEMINI_USER_PROMPT = `Now, always using the tools at your disposal, step by step, come up with a response to this request:\n\n`;
|
||||
|
|
|
@ -10,6 +10,7 @@ import {
|
|||
BEDROCK_SYSTEM_PROMPT,
|
||||
DEFAULT_SYSTEM_PROMPT,
|
||||
GEMINI_SYSTEM_PROMPT,
|
||||
GEMINI_USER_PROMPT,
|
||||
} from './nodes/translations';
|
||||
|
||||
export const formatPrompt = (prompt: string, additionalPrompt?: string) =>
|
||||
|
@ -23,7 +24,8 @@ export const formatPrompt = (prompt: string, additionalPrompt?: string) =>
|
|||
export const systemPrompts = {
|
||||
openai: DEFAULT_SYSTEM_PROMPT,
|
||||
bedrock: `${DEFAULT_SYSTEM_PROMPT} ${BEDROCK_SYSTEM_PROMPT}`,
|
||||
gemini: `${DEFAULT_SYSTEM_PROMPT} ${GEMINI_SYSTEM_PROMPT}`,
|
||||
// The default prompt overwhelms gemini, do not prepend
|
||||
gemini: GEMINI_SYSTEM_PROMPT,
|
||||
structuredChat: `Respond to the human as helpfully and accurately as possible. You have access to the following tools:
|
||||
|
||||
{tools}
|
||||
|
@ -98,3 +100,16 @@ export const formatPromptStructured = (prompt: string, additionalPrompt?: string
|
|||
]);
|
||||
|
||||
export const structuredChatAgentPrompt = formatPromptStructured(systemPrompts.structuredChat);
|
||||
|
||||
/**
|
||||
* If Gemini is the llmType,
|
||||
* Adds a user prompt for the latest message in a conversation
|
||||
* @param prompt
|
||||
* @param llmType
|
||||
*/
|
||||
export const formatLatestUserMessage = (prompt: string, llmType?: string): string => {
|
||||
if (llmType === 'gemini') {
|
||||
return `${GEMINI_USER_PROMPT}${prompt}`;
|
||||
}
|
||||
return prompt;
|
||||
};
|
||||
|
|
|
@ -17,7 +17,7 @@ import {
|
|||
ActionsClientChatOpenAI,
|
||||
ActionsClientBedrockChatModel,
|
||||
ActionsClientSimpleChatModel,
|
||||
ActionsClientGeminiChatModel,
|
||||
ActionsClientChatVertexAI,
|
||||
} from '@kbn/langchain/server';
|
||||
import { CustomHttpRequestError } from './custom_http_request_error';
|
||||
|
||||
|
@ -187,5 +187,5 @@ export const getLlmClass = (llmType?: string, bedrockChatEnabled?: boolean) =>
|
|||
: llmType === 'bedrock' && bedrockChatEnabled
|
||||
? ActionsClientBedrockChatModel
|
||||
: llmType === 'gemini' && bedrockChatEnabled
|
||||
? ActionsClientGeminiChatModel
|
||||
? ActionsClientChatVertexAI
|
||||
: ActionsClientSimpleChatModel;
|
||||
|
|
|
@ -39,6 +39,7 @@ import {
|
|||
import {
|
||||
ActionsClientBedrockChatModel,
|
||||
ActionsClientChatOpenAI,
|
||||
ActionsClientChatVertexAI,
|
||||
ActionsClientGeminiChatModel,
|
||||
ActionsClientLlm,
|
||||
ActionsClientSimpleChatModel,
|
||||
|
@ -230,6 +231,7 @@ export type AssistantToolLlm =
|
|||
| ActionsClientBedrockChatModel
|
||||
| ActionsClientChatOpenAI
|
||||
| ActionsClientGeminiChatModel
|
||||
| ActionsClientChatVertexAI
|
||||
| ActionsClientSimpleChatModel;
|
||||
|
||||
export interface AssistantToolParams {
|
||||
|
|
|
@ -17,7 +17,7 @@ import { APP_UI_ID } from '../../../../common';
|
|||
|
||||
const toolDetails = {
|
||||
description:
|
||||
'Call this for knowledge on how to build an ESQL query, or answer questions about the ES|QL query language. Input must always be the query on a single line, with no other text. Your answer will be parsed as JSON, so never use quotes within the output and instead use backticks. Do not add any additional text to describe your output.',
|
||||
'Call this for knowledge on how to build an ESQL query, or answer questions about the ES|QL query language. Input must always be the user query on a single line, with no other text. Your answer will be parsed as JSON, so never use quotes within the output and instead use backticks. Do not add any additional text to describe your output.',
|
||||
id: 'esql-knowledge-base-tool',
|
||||
name: 'ESQLKnowledgeBaseTool',
|
||||
};
|
||||
|
|
|
@ -20,16 +20,11 @@ const toolDetails = {
|
|||
id: 'nl-to-esql-tool',
|
||||
name: TOOL_NAME,
|
||||
description: `You MUST use the "${TOOL_NAME}" function when the user wants to:
|
||||
- run any arbitrary query
|
||||
- breakdown or filter ES|QL queries that are displayed on the current page
|
||||
- convert queries from another language to ES|QL
|
||||
- asks general questions about ES|QL
|
||||
|
||||
DO NOT UNDER ANY CIRCUMSTANCES generate ES|QL queries or explain anything about the ES|QL query language yourself.
|
||||
DO NOT UNDER ANY CIRCUMSTANCES try to correct an ES|QL query yourself - always use the "${TOOL_NAME}" function for this.
|
||||
|
||||
Even if the "${TOOL_NAME}" function was used before that, follow it up with the "${TOOL_NAME}" function. If a query fails, do not attempt to correct it yourself. Again you should call the "${TOOL_NAME}" function,
|
||||
even if it has been called before.`,
|
||||
ALWAYS use this tool to generate ES|QL queries or explain anything about the ES|QL query language rather than coming up with your own answer.`,
|
||||
};
|
||||
|
||||
export const NL_TO_ESQL_TOOL: AssistantTool = {
|
||||
|
|
|
@ -395,9 +395,7 @@ const formatGeminiPayload = ({
|
|||
temperature,
|
||||
maxOutputTokens: DEFAULT_TOKEN_LIMIT,
|
||||
},
|
||||
...(systemInstruction
|
||||
? { system_instruction: { role: 'user', parts: [{ text: systemInstruction }] } }
|
||||
: {}),
|
||||
...(systemInstruction ? { system_instruction: { parts: [{ text: systemInstruction }] } } : {}),
|
||||
...(toolConfig
|
||||
? {
|
||||
tool_config: {
|
||||
|
|
124
yarn.lock
124
yarn.lock
|
@ -7312,17 +7312,16 @@
|
|||
zod "^3.22.3"
|
||||
zod-to-json-schema "^3.22.5"
|
||||
|
||||
"@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>=0.2.11 <0.3.0", "@langchain/core@>=0.2.16 <0.3.0", "@langchain/core@>=0.2.20 <0.3.0", "@langchain/core@>=0.2.5 <0.3.0", "@langchain/core@^0.2.18", "@langchain/core@~0.2.11":
|
||||
version "0.2.18"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.18.tgz#1ac4f307fa217ab3555c9634147a6c4ad9826092"
|
||||
integrity sha512-ru542BwNcsnDfjTeDbIkFIchwa54ctHZR+kVrC8U9NPS9/36iM8p8ruprOV7Zccj/oxtLE5UpEhV+9MZhVcFlA==
|
||||
"@langchain/core@>0.1.0 <0.3.0", "@langchain/core@>=0.2.11 <0.3.0", "@langchain/core@>=0.2.20 <0.3.0", "@langchain/core@>=0.2.5 <0.3.0", "@langchain/core@^0.2.18", "@langchain/core@~0.2.11":
|
||||
version "0.2.32"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/core/-/core-0.2.32.tgz#a5dfbc49f8b6c15c8082763b93aeae8f9f4ca1a0"
|
||||
integrity sha512-S27M+9Qou2qtcLfFGEvANkJ/zHq5XApeQsR6Q4I7C6v9x07eoYr558h6vVy6WQmKcksgbCIJ854ikwp173wBjA==
|
||||
dependencies:
|
||||
ansi-styles "^5.0.0"
|
||||
camelcase "6"
|
||||
decamelize "1.2.0"
|
||||
js-tiktoken "^1.0.12"
|
||||
langsmith "~0.1.39"
|
||||
ml-distance "^4.0.0"
|
||||
langsmith "^0.1.43"
|
||||
mustache "^4.2.0"
|
||||
p-queue "^6.6.2"
|
||||
p-retry "4"
|
||||
|
@ -7330,15 +7329,37 @@
|
|||
zod "^3.22.4"
|
||||
zod-to-json-schema "^3.22.3"
|
||||
|
||||
"@langchain/google-genai@^0.0.23":
|
||||
version "0.0.23"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/google-genai/-/google-genai-0.0.23.tgz#e73af501bc1df4c7642b531759b82dc3eb7ae459"
|
||||
integrity sha512-MTSCJEoKsfU1inz0PWvAjITdNFM4s41uvBCwLpcgx3jWJIEisczFD82x86ahYqJlb2fD6tohYSaCH/4tKAdkXA==
|
||||
"@langchain/google-common@^0.1.1", "@langchain/google-common@~0.1.0":
|
||||
version "0.1.1"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/google-common/-/google-common-0.1.1.tgz#7f8730e3dfb2920487dece5cbe738c13266b5e7b"
|
||||
integrity sha512-oT/6lBev/Ufkp1dJbOTJ2S7xD9c+w9CqnqKqFOSxuZJbM4G8hzJtt7PDBOGfamIwtQP8dR7ORKXs1sCl+f5Tig==
|
||||
dependencies:
|
||||
uuid "^10.0.0"
|
||||
zod-to-json-schema "^3.22.4"
|
||||
|
||||
"@langchain/google-gauth@~0.1.0":
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/google-gauth/-/google-gauth-0.1.0.tgz#ea44941afede32a1d14fd18a6c6b500c7c7a53eb"
|
||||
integrity sha512-0kps1NmaNiSl4n3lRw+7xsyhrEfIxNqBjih0kNYWPjLg55f9I9+QAlz7F1Sz/628HF1WQLFLQcBQA4geGzvenQ==
|
||||
dependencies:
|
||||
"@langchain/google-common" "~0.1.0"
|
||||
google-auth-library "^8.9.0"
|
||||
|
||||
"@langchain/google-genai@^0.1.0":
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/google-genai/-/google-genai-0.1.0.tgz#89552873210d72a5834de20fcbef3e6753283344"
|
||||
integrity sha512-6rIba77zJVMj+048tLfkCBrkFbfAMiT+AfLEsu5s+CFoFmXMiI/dbKeDL4vhUWrJVb9uL4ZZyrnl0nKxyEKYgA==
|
||||
dependencies:
|
||||
"@google/generative-ai" "^0.7.0"
|
||||
"@langchain/core" ">=0.2.16 <0.3.0"
|
||||
zod-to-json-schema "^3.22.4"
|
||||
|
||||
"@langchain/google-vertexai@^0.1.0":
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/google-vertexai/-/google-vertexai-0.1.0.tgz#e8ac6ae0bbdb6364d579def171ccfc1a26fd0cf2"
|
||||
integrity sha512-xTi5NvNGSLQl/7OTsj4QTT0DkNbZ7cYDrEB0HqpZOwo6I5dulh/h2payGVQ6hdXj7Yyv78dRc5FdQSbyHui/WQ==
|
||||
dependencies:
|
||||
"@langchain/google-gauth" "~0.1.0"
|
||||
|
||||
"@langchain/langgraph@0.0.34":
|
||||
version "0.0.34"
|
||||
resolved "https://registry.yarnpkg.com/@langchain/langgraph/-/langgraph-0.0.34.tgz#1504c29ce524d08d6f076c34e0623c6de1f1246c"
|
||||
|
@ -11509,16 +11530,16 @@
|
|||
resolved "https://registry.yarnpkg.com/@types/use-sync-external-store/-/use-sync-external-store-0.0.3.tgz#b6725d5f4af24ace33b36fafd295136e75509f43"
|
||||
integrity sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==
|
||||
|
||||
"@types/uuid@^10.0.0":
|
||||
version "10.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-10.0.0.tgz#e9c07fe50da0f53dc24970cca94d619ff03f6f6d"
|
||||
integrity sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==
|
||||
|
||||
"@types/uuid@^9.0.0":
|
||||
version "9.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.0.tgz#53ef263e5239728b56096b0a869595135b7952d2"
|
||||
integrity sha512-kr90f+ERiQtKWMz5rP32ltJ/BtULDI5RVO0uavn1HQUOwjx0R1h0rnDYNL0CepF1zL5bSY6FISAfd9tOdDhU5Q==
|
||||
|
||||
"@types/uuid@^9.0.1":
|
||||
version "9.0.2"
|
||||
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.2.tgz#ede1d1b1e451548d44919dc226253e32a6952c4b"
|
||||
integrity sha512-kNnC1GFBLuhImSnV7w4njQkUiJi0ZXUycu1rUaouPqiKlXkh77JKgdRnTAp1x5eBwcIwbtI+3otwzuIDEuDoxQ==
|
||||
|
||||
"@types/vinyl-fs@*", "@types/vinyl-fs@^3.0.2":
|
||||
version "3.0.2"
|
||||
resolved "https://registry.yarnpkg.com/@types/vinyl-fs/-/vinyl-fs-3.0.2.tgz#cbaef5160ad7695483af0aa1b4fe67f166c18feb"
|
||||
|
@ -12760,7 +12781,7 @@ arrify@^1.0.1:
|
|||
resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
|
||||
integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=
|
||||
|
||||
arrify@^2.0.1:
|
||||
arrify@^2.0.0, arrify@^2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/arrify/-/arrify-2.0.1.tgz#c9655e9331e0abcd588d2a7cad7e9956f66701fa"
|
||||
integrity sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==
|
||||
|
@ -18019,6 +18040,11 @@ fast-stream-to-buffer@^1.0.0:
|
|||
dependencies:
|
||||
end-of-stream "^1.4.1"
|
||||
|
||||
fast-text-encoding@^1.0.0:
|
||||
version "1.0.6"
|
||||
resolved "https://registry.yarnpkg.com/fast-text-encoding/-/fast-text-encoding-1.0.6.tgz#0aa25f7f638222e3396d72bf936afcf1d42d6867"
|
||||
integrity sha512-VhXlQgj9ioXCqGstD37E/HBeqEGV/qOD/kmbVG8h5xKBYvM1L3lR1Zn4555cQ8GkYbJa8aJSipLPndE1k6zK2w==
|
||||
|
||||
fastest-levenshtein@^1.0.12:
|
||||
version "1.0.12"
|
||||
resolved "https://registry.yarnpkg.com/fastest-levenshtein/-/fastest-levenshtein-1.0.12.tgz#9990f7d3a88cc5a9ffd1f1745745251700d497e2"
|
||||
|
@ -18691,6 +18717,16 @@ gauge@^3.0.0:
|
|||
strip-ansi "^6.0.1"
|
||||
wide-align "^1.1.2"
|
||||
|
||||
gaxios@^5.0.0, gaxios@^5.0.1:
|
||||
version "5.1.3"
|
||||
resolved "https://registry.yarnpkg.com/gaxios/-/gaxios-5.1.3.tgz#f7fa92da0fe197c846441e5ead2573d4979e9013"
|
||||
integrity sha512-95hVgBRgEIRQQQHIbnxBXeHbW4TqFk4ZDJW7wmVtvYar72FdhRIo1UGOLS2eRAKCPEdPBWu+M7+A33D9CdX9rA==
|
||||
dependencies:
|
||||
extend "^3.0.2"
|
||||
https-proxy-agent "^5.0.0"
|
||||
is-stream "^2.0.0"
|
||||
node-fetch "^2.6.9"
|
||||
|
||||
gaxios@^6.0.0, gaxios@^6.1.1:
|
||||
version "6.6.0"
|
||||
resolved "https://registry.yarnpkg.com/gaxios/-/gaxios-6.6.0.tgz#af8242fff0bbb82a682840d5feaa91b6a1c58be4"
|
||||
|
@ -18702,6 +18738,14 @@ gaxios@^6.0.0, gaxios@^6.1.1:
|
|||
node-fetch "^2.6.9"
|
||||
uuid "^9.0.1"
|
||||
|
||||
gcp-metadata@^5.3.0:
|
||||
version "5.3.0"
|
||||
resolved "https://registry.yarnpkg.com/gcp-metadata/-/gcp-metadata-5.3.0.tgz#6f45eb473d0cb47d15001476b48b663744d25408"
|
||||
integrity sha512-FNTkdNEnBdlqF2oatizolQqNANMrcqJt6AAYt99B3y1aLLC8Hc5IOBb+ZnnzllodEEf6xMBp6wRcBbc16fa65w==
|
||||
dependencies:
|
||||
gaxios "^5.0.0"
|
||||
json-bigint "^1.0.0"
|
||||
|
||||
gcp-metadata@^6.1.0:
|
||||
version "6.1.0"
|
||||
resolved "https://registry.yarnpkg.com/gcp-metadata/-/gcp-metadata-6.1.0.tgz#9b0dd2b2445258e7597f2024332d20611cbd6b8c"
|
||||
|
@ -19149,6 +19193,21 @@ gonzales-pe@^4.3.0:
|
|||
dependencies:
|
||||
minimist "^1.2.5"
|
||||
|
||||
google-auth-library@^8.9.0:
|
||||
version "8.9.0"
|
||||
resolved "https://registry.yarnpkg.com/google-auth-library/-/google-auth-library-8.9.0.tgz#15a271eb2ec35d43b81deb72211bd61b1ef14dd0"
|
||||
integrity sha512-f7aQCJODJFmYWN6PeNKzgvy9LI2tYmXnzpNDHEjG5sDNPgGb2FXQyTBnXeSH+PAtpKESFD+LmHw3Ox3mN7e1Fg==
|
||||
dependencies:
|
||||
arrify "^2.0.0"
|
||||
base64-js "^1.3.0"
|
||||
ecdsa-sig-formatter "^1.0.11"
|
||||
fast-text-encoding "^1.0.0"
|
||||
gaxios "^5.0.0"
|
||||
gcp-metadata "^5.3.0"
|
||||
gtoken "^6.1.0"
|
||||
jws "^4.0.0"
|
||||
lru-cache "^6.0.0"
|
||||
|
||||
google-auth-library@^9.10.0:
|
||||
version "9.10.0"
|
||||
resolved "https://registry.yarnpkg.com/google-auth-library/-/google-auth-library-9.10.0.tgz#c9fb940923f7ff2569d61982ee1748578c0bbfd4"
|
||||
|
@ -19161,6 +19220,13 @@ google-auth-library@^9.10.0:
|
|||
gtoken "^7.0.0"
|
||||
jws "^4.0.0"
|
||||
|
||||
google-p12-pem@^4.0.0:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/google-p12-pem/-/google-p12-pem-4.0.1.tgz#82841798253c65b7dc2a4e5fe9df141db670172a"
|
||||
integrity sha512-WPkN4yGtz05WZ5EhtlxNDWPhC4JIic6G8ePitwUWy4l+XPVYec+a0j0Ts47PDtW59y3RwAhUd9/h9ZZ63px6RQ==
|
||||
dependencies:
|
||||
node-forge "^1.3.1"
|
||||
|
||||
google-protobuf@^3.6.1:
|
||||
version "3.19.4"
|
||||
resolved "https://registry.yarnpkg.com/google-protobuf/-/google-protobuf-3.19.4.tgz#8d32c3e34be9250956f28c0fb90955d13f311888"
|
||||
|
@ -19226,6 +19292,15 @@ graphql@^16.6.0, graphql@^16.8.1:
|
|||
resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.8.1.tgz#1930a965bef1170603702acdb68aedd3f3cf6f07"
|
||||
integrity sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==
|
||||
|
||||
gtoken@^6.1.0:
|
||||
version "6.1.2"
|
||||
resolved "https://registry.yarnpkg.com/gtoken/-/gtoken-6.1.2.tgz#aeb7bdb019ff4c3ba3ac100bbe7b6e74dce0e8bc"
|
||||
integrity sha512-4ccGpzz7YAr7lxrT2neugmXQ3hP9ho2gcaityLVkiUecAiwiy60Ii8gRbZeOsXV19fYaRjgBSshs8kXw+NKCPQ==
|
||||
dependencies:
|
||||
gaxios "^5.0.1"
|
||||
google-p12-pem "^4.0.0"
|
||||
jws "^4.0.0"
|
||||
|
||||
gtoken@^7.0.0:
|
||||
version "7.1.0"
|
||||
resolved "https://registry.yarnpkg.com/gtoken/-/gtoken-7.1.0.tgz#d61b4ebd10132222817f7222b1e6064bd463fc26"
|
||||
|
@ -19915,7 +19990,7 @@ https-browserify@^1.0.0:
|
|||
resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73"
|
||||
integrity sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=
|
||||
|
||||
https-proxy-agent@^5.0.1:
|
||||
https-proxy-agent@^5.0.0, https-proxy-agent@^5.0.1:
|
||||
version "5.0.1"
|
||||
resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6"
|
||||
integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==
|
||||
|
@ -22093,16 +22168,17 @@ langchainhub@~0.0.8:
|
|||
resolved "https://registry.yarnpkg.com/langchainhub/-/langchainhub-0.0.8.tgz#fd4b96dc795e22e36c1a20bad31b61b0c33d3110"
|
||||
integrity sha512-Woyb8YDHgqqTOZvWIbm2CaFDGfZ4NTSyXV687AG4vXEfoNo7cGQp7nhl7wL3ehenKWmNEmcxCLgOZzW8jE6lOQ==
|
||||
|
||||
langsmith@^0.1.39, langsmith@~0.1.30, langsmith@~0.1.39:
|
||||
version "0.1.39"
|
||||
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.39.tgz#cc99f1828a9c0f5ba24bec6b0121edc44e8d282d"
|
||||
integrity sha512-K2/qbc96JhrZbSL74RbZ0DBOpTB9Mxicu8RQrZ88Xsp1bH2O3+y5EdcvC0g/1YzQWQhcQ4peknCA24c3VTNiNA==
|
||||
langsmith@^0.1.43, langsmith@^0.1.55, langsmith@~0.1.30:
|
||||
version "0.1.55"
|
||||
resolved "https://registry.yarnpkg.com/langsmith/-/langsmith-0.1.55.tgz#bdbb8015a28093f4a248c0ee9b8937731c5baa93"
|
||||
integrity sha512-6NVtI04UUnIY59I/imOX02FG/QMGfqStu8tiJtyyreKMv2GAN0EE9Z5Ap1wzOe6v8ukEcV3NwEO2LYOPwup1PQ==
|
||||
dependencies:
|
||||
"@types/uuid" "^9.0.1"
|
||||
"@types/uuid" "^10.0.0"
|
||||
commander "^10.0.1"
|
||||
p-queue "^6.6.2"
|
||||
p-retry "4"
|
||||
uuid "^9.0.0"
|
||||
semver "^7.6.3"
|
||||
uuid "^10.0.0"
|
||||
|
||||
language-subtag-registry@~0.3.2:
|
||||
version "0.3.21"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue