[Security solution] AI Assistant, replace LLM with SimpleChatModel + Bedrock streaming (#182041)

This commit is contained in:
Steph Milovic 2024-05-22 16:29:14 -06:00 committed by GitHub
parent 9cb7940601
commit e2e1fb3504
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
59 changed files with 1467 additions and 414 deletions

View file

@ -1058,6 +1058,7 @@ module.exports = {
'x-pack/plugins/elastic_assistant/**/*.{ts,tsx}',
'x-pack/packages/kbn-elastic-assistant/**/*.{ts,tsx}',
'x-pack/packages/kbn-elastic-assistant-common/**/*.{ts,tsx}',
'x-pack/packages/kbn-langchain/**/*.{ts,tsx}',
'x-pack/packages/security-solution/**/*.{ts,tsx}',
'x-pack/plugins/security_solution/**/*.{ts,tsx}',
'x-pack/plugins/security_solution_ess/**/*.{ts,tsx}',
@ -1071,6 +1072,7 @@ module.exports = {
'x-pack/plugins/elastic_assistant/**/*.{test,mock,test_helper}.{ts,tsx}',
'x-pack/packages/kbn-elastic-assistant/**/*.{test,mock,test_helper}.{ts,tsx}',
'x-pack/packages/kbn-elastic-assistant-common/**/*.{test,mock,test_helper}.{ts,tsx}',
'x-pack/packages/kbn-langchain/**/*.{test,mock,test_helper}.{ts,tsx}',
'x-pack/packages/security-solution/**/*.{test,mock,test_helper}.{ts,tsx}',
'x-pack/plugins/security_solution/**/*.{test,mock,test_helper}.{ts,tsx}',
'x-pack/plugins/security_solution_ess/**/*.{test,mock,test_helper}.{ts,tsx}',
@ -1090,6 +1092,7 @@ module.exports = {
'x-pack/plugins/elastic_assistant/**/*.{ts,tsx}',
'x-pack/packages/kbn-elastic-assistant/**/*.{ts,tsx}',
'x-pack/packages/kbn-elastic-assistant-common/**/*.{ts,tsx}',
'x-pack/packages/kbn-langchain/**/*.{ts,tsx}',
'x-pack/packages/security-solution/**/*.{ts,tsx}',
'x-pack/plugins/security_solution/**/*.{ts,tsx}',
'x-pack/plugins/security_solution_ess/**/*.{ts,tsx}',
@ -1128,6 +1131,7 @@ module.exports = {
'x-pack/plugins/elastic_assistant/**/*.{js,mjs,ts,tsx}',
'x-pack/packages/kbn-elastic-assistant/**/*.{js,mjs,ts,tsx}',
'x-pack/packages/kbn-elastic-assistant-common/**/*.{js,mjs,ts,tsx}',
'x-pack/packages/kbn-langchain/**/*.{js,mjs,ts,tsx}',
'x-pack/packages/security-solution/**/*.{js,mjs,ts,tsx}',
'x-pack/plugins/security_solution/**/*.{js,mjs,ts,tsx}',
'x-pack/plugins/security_solution_ess/**/*.{js,mjs,ts,tsx}',

1
.github/CODEOWNERS vendored
View file

@ -517,6 +517,7 @@ src/plugins/kibana_react @elastic/appex-sharedux
src/plugins/kibana_usage_collection @elastic/kibana-core
src/plugins/kibana_utils @elastic/appex-sharedux
x-pack/plugins/kubernetes_security @elastic/kibana-cloud-security-posture
x-pack/packages/kbn-langchain @elastic/security-generative-ai
packages/kbn-language-documentation-popover @elastic/kibana-esql
x-pack/examples/lens_config_builder_example @elastic/kibana-visualizations
packages/kbn-lens-embeddable-utils @elastic/obs-ux-infra_services-team @elastic/kibana-visualizations

View file

@ -544,6 +544,7 @@
"@kbn/kibana-usage-collection-plugin": "link:src/plugins/kibana_usage_collection",
"@kbn/kibana-utils-plugin": "link:src/plugins/kibana_utils",
"@kbn/kubernetes-security-plugin": "link:x-pack/plugins/kubernetes_security",
"@kbn/langchain": "link:x-pack/packages/kbn-langchain",
"@kbn/language-documentation-popover": "link:packages/kbn-language-documentation-popover",
"@kbn/lens-config-builder-example-plugin": "link:x-pack/examples/lens_config_builder_example",
"@kbn/lens-embeddable-utils": "link:packages/kbn-lens-embeddable-utils",

View file

@ -1028,6 +1028,8 @@
"@kbn/kibana-utils-plugin/*": ["src/plugins/kibana_utils/*"],
"@kbn/kubernetes-security-plugin": ["x-pack/plugins/kubernetes_security"],
"@kbn/kubernetes-security-plugin/*": ["x-pack/plugins/kubernetes_security/*"],
"@kbn/langchain": ["x-pack/packages/kbn-langchain"],
"@kbn/langchain/*": ["x-pack/packages/kbn-langchain/*"],
"@kbn/language-documentation-popover": ["packages/kbn-language-documentation-popover"],
"@kbn/language-documentation-popover/*": ["packages/kbn-language-documentation-popover/*"],
"@kbn/lens-config-builder-example-plugin": ["x-pack/examples/lens_config_builder_example"],

View file

@ -1,11 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
export const mockActionResponse = {
message: 'Yes, your name is Andrew. How can I assist you further, Andrew?',
usage: { prompt_tokens: 4, completion_tokens: 10, total_tokens: 14 },
};

View file

@ -5,10 +5,30 @@
* 2.0.
*/
import { Logger } from '@kbn/core/server';
import { Logger } from '@kbn/logging';
import { EventStreamCodec } from '@smithy/eventstream-codec';
import { fromUtf8, toUtf8 } from '@smithy/util-utf8';
/**
* Parses a Bedrock buffer from an array of chunks.
*
* @param {Uint8Array[]} chunks - Array of Uint8Array chunks to be parsed.
* @returns {string} - Parsed string from the Bedrock buffer.
*/
export const parseBedrockBuffer = (chunks: Uint8Array[], logger: Logger): string => {
// Initialize an empty Uint8Array to store the concatenated buffer.
let bedrockBuffer: Uint8Array = new Uint8Array(0);
// Map through each chunk to process the Bedrock buffer.
return chunks
.map((chunk) => {
const processedChunk = handleBedrockChunk({ chunk, bedrockBuffer, logger });
bedrockBuffer = processedChunk.bedrockBuffer;
return processedChunk.decodedChunk;
})
.join('');
};
/**
* Handle a chunk of data from the bedrock API.
* @param chunk - The chunk of data to process.
@ -55,7 +75,9 @@ export const handleBedrockChunk = ({
Buffer.from(JSON.parse(new TextDecoder().decode(event.body)).bytes, 'base64').toString()
);
const decodedContent = prepareBedrockOutput(body, logger);
if (chunkHandler) chunkHandler(decodedContent);
if (chunkHandler) {
chunkHandler(decodedContent);
}
return decodedContent;
})
.join('');

View file

@ -23,6 +23,5 @@ export {
} from './impl/data_anonymization/helpers';
export { transformRawData } from './impl/data_anonymization/transform_raw_data';
export { handleBedrockChunk } from './impl/utils/bedrock';
export { parseBedrockBuffer, handleBedrockChunk } from './impl/utils/bedrock';
export * from './constants';

View file

@ -19,7 +19,6 @@
"@kbn/zod-helpers",
"@kbn/securitysolution-io-ts-utils",
"@kbn/core",
"@kbn/actions-plugin",
"@kbn/logging-mocks",
"@kbn/logging",
]
}

View file

@ -24,7 +24,7 @@ const mockHttp = {
fetch: jest.fn(),
} as unknown as HttpSetup;
const apiConfig: Record<'openai' | 'bedrock', ApiConfig> = {
const apiConfig: Record<'openai' | 'bedrock' | 'gemini', ApiConfig> = {
openai: {
connectorId: 'foo',
actionTypeId: '.gen-ai',
@ -35,6 +35,10 @@ const apiConfig: Record<'openai' | 'bedrock', ApiConfig> = {
connectorId: 'foo',
actionTypeId: '.bedrock',
},
gemini: {
connectorId: 'foo',
actionTypeId: '.gemini',
},
};
const fetchConnectorArgs: FetchConnectorExecuteAction = {
@ -94,7 +98,7 @@ describe('API tests', () => {
);
});
it('calls the non-stream API when assistantStreamingEnabled is true and actionTypeId is bedrock and isEnabledKnowledgeBase is true', async () => {
it('calls the stream API when assistantStreamingEnabled is true and actionTypeId is bedrock and isEnabledKnowledgeBase is true', async () => {
const testProps: FetchConnectorExecuteAction = {
...fetchConnectorArgs,
apiConfig: apiConfig.bedrock,
@ -105,13 +109,13 @@ describe('API tests', () => {
expect(mockHttp.fetch).toHaveBeenCalledWith(
'/internal/elastic_assistant/actions/connector/foo/_execute',
{
...staticDefaults,
body: '{"message":"This is a test","subAction":"invokeAI","conversationId":"test","actionTypeId":".bedrock","replacements":{},"isEnabledKnowledgeBase":true,"isEnabledRAGAlerts":false}',
...streamingDefaults,
body: '{"message":"This is a test","subAction":"invokeStream","conversationId":"test","actionTypeId":".bedrock","replacements":{},"isEnabledKnowledgeBase":true,"isEnabledRAGAlerts":false}',
}
);
});
it('calls the non-stream API when assistantStreamingEnabled is true and actionTypeId is bedrock and isEnabledKnowledgeBase is false and isEnabledRAGAlerts is true', async () => {
it('calls the stream API when assistantStreamingEnabled is true and actionTypeId is bedrock and isEnabledKnowledgeBase is false and isEnabledRAGAlerts is true', async () => {
const testProps: FetchConnectorExecuteAction = {
...fetchConnectorArgs,
apiConfig: apiConfig.bedrock,
@ -121,11 +125,47 @@ describe('API tests', () => {
await fetchConnectorExecuteAction(testProps);
expect(mockHttp.fetch).toHaveBeenCalledWith(
'/internal/elastic_assistant/actions/connector/foo/_execute',
{
...streamingDefaults,
body: '{"message":"This is a test","subAction":"invokeStream","conversationId":"test","actionTypeId":".bedrock","replacements":{},"isEnabledKnowledgeBase":false,"isEnabledRAGAlerts":true}',
}
);
});
it('calls the non-stream API when assistantStreamingEnabled is true and actionTypeId is gemini and isEnabledKnowledgeBase is true', async () => {
const testProps: FetchConnectorExecuteAction = {
...fetchConnectorArgs,
apiConfig: apiConfig.gemini,
};
await fetchConnectorExecuteAction(testProps);
expect(mockHttp.fetch).toHaveBeenCalledWith(
'/internal/elastic_assistant/actions/connector/foo/_execute',
{
...staticDefaults,
body: '{"message":"This is a test","subAction":"invokeAI","conversationId":"test","actionTypeId":".bedrock","replacements":{},"isEnabledKnowledgeBase":false,"isEnabledRAGAlerts":true}',
body: '{"message":"This is a test","subAction":"invokeAI","conversationId":"test","actionTypeId":".gemini","replacements":{},"isEnabledKnowledgeBase":true,"isEnabledRAGAlerts":false}',
}
);
});
it('calls the non-stream API when assistantStreamingEnabled is true and actionTypeId is gemini and isEnabledKnowledgeBase is false and isEnabledRAGAlerts is true', async () => {
const testProps: FetchConnectorExecuteAction = {
...fetchConnectorArgs,
apiConfig: apiConfig.gemini,
isEnabledKnowledgeBase: false,
isEnabledRAGAlerts: true,
};
await fetchConnectorExecuteAction(testProps);
expect(mockHttp.fetch).toHaveBeenCalledWith(
'/internal/elastic_assistant/actions/connector/foo/_execute',
{
...staticDefaults,
body: '{"message":"This is a test","subAction":"invokeAI","conversationId":"test","actionTypeId":".gemini","replacements":{},"isEnabledKnowledgeBase":false,"isEnabledRAGAlerts":true}',
}
);
});

View file

@ -63,12 +63,14 @@ export const fetchConnectorExecuteAction = async ({
size,
traceOptions,
}: FetchConnectorExecuteAction): Promise<FetchConnectorExecuteResponse> => {
// TODO add streaming support for gemini with langchain on
const isStream =
assistantStreamingEnabled &&
(apiConfig.actionTypeId === '.gen-ai' ||
// TODO add streaming support for bedrock with langchain on
apiConfig.actionTypeId === '.bedrock' ||
// TODO add streaming support for gemini with langchain on
// tracked here: https://github.com/elastic/security-team/issues/7363
(apiConfig.actionTypeId === '.bedrock' && !isEnabledRAGAlerts && !isEnabledKnowledgeBase));
(apiConfig.actionTypeId === '.gemini' && !isEnabledRAGAlerts && !isEnabledKnowledgeBase));
const optionalRequestParams = getOptionalRequestParams({
isEnabledRAGAlerts,

View file

@ -15,13 +15,6 @@ export const YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT = i18n.translate(
}
);
export const USE_THE_FOLLOWING_CONTEXT_TO_ANSWER = i18n.translate(
'xpack.elasticAssistant.assistant.content.prompts.system.useTheFollowingContextToAnswer',
{
defaultMessage: 'Use the following context to answer questions:',
}
);
export const IF_YOU_DONT_KNOW_THE_ANSWER = i18n.translate(
'xpack.elasticAssistant.assistant.content.prompts.system.ifYouDontKnowTheAnswer',
{
@ -37,8 +30,7 @@ export const SUPERHERO_PERSONALITY = i18n.translate(
}
);
export const DEFAULT_SYSTEM_PROMPT_NON_I18N = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER}
${USE_THE_FOLLOWING_CONTEXT_TO_ANSWER}`;
export const DEFAULT_SYSTEM_PROMPT_NON_I18N = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER}`;
export const DEFAULT_SYSTEM_PROMPT_NAME = i18n.translate(
'xpack.elasticAssistant.assistant.content.prompts.system.defaultSystemPromptName',
@ -48,8 +40,7 @@ export const DEFAULT_SYSTEM_PROMPT_NAME = i18n.translate(
);
export const SUPERHERO_SYSTEM_PROMPT_NON_I18N = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER}
${SUPERHERO_PERSONALITY}
${USE_THE_FOLLOWING_CONTEXT_TO_ANSWER}`;
${SUPERHERO_PERSONALITY}`;
export const SUPERHERO_SYSTEM_PROMPT_NAME = i18n.translate(
'xpack.elasticAssistant.assistant.content.prompts.system.superheroSystemPromptName',

View file

@ -0,0 +1,5 @@
# @kbn/langchain
Contains LangChain language models to be used with Kibana connectors
The package does not expose `index.ts` at its root, instead there's a `server` directory you should deep-import from.

View file

@ -0,0 +1,22 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
module.exports = {
coverageDirectory: '<rootDir>/target/kibana-coverage/jest/x-pack/packages/kbn_langchain',
coverageReporters: ['text', 'html'],
collectCoverageFrom: [
'<rootDir>/x-pack/packages/kbn-langchain/server/**/*.{ts}',
'!<rootDir>/x-pack/packages/kbn-langchain/server/{__test__,__snapshots__,__examples__,*mock*,tests,test_helpers,integration_tests,types}/**/*',
'!<rootDir>/x-pack/packages/kbn-langchain/server/*mock*.{ts}',
'!<rootDir>/x-pack/packages/kbn-langchain/server/*.test.{ts}',
'!<rootDir>/x-pack/packages/kbn-langchain/server/*.d.ts',
'!<rootDir>/x-pack/packages/kbn-langchain/server/*.config.ts',
],
preset: '@kbn/test',
rootDir: '../../..',
roots: ['<rootDir>/x-pack/packages/kbn-langchain'],
};

View file

@ -0,0 +1,5 @@
{
"type": "shared-server",
"id": "@kbn/langchain",
"owner": "@elastic/security-generative-ai"
}

View file

@ -0,0 +1,7 @@
{
"name": "@kbn/langchain",
"private": true,
"version": "1.0.0",
"license": "Elastic License 2.0",
"sideEffects": false
}

View file

@ -0,0 +1,20 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { ActionsClientChatOpenAI } from './language_models/chat_openai';
import { ActionsClientLlm } from './language_models/llm';
import { ActionsClientSimpleChatModel } from './language_models/simple_chat_model';
import { parseBedrockStream } from './utils/bedrock';
import { getDefaultArguments } from './language_models/constants';
export {
parseBedrockStream,
getDefaultArguments,
ActionsClientChatOpenAI,
ActionsClientLlm,
ActionsClientSimpleChatModel,
};

View file

@ -0,0 +1,204 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type OpenAI from 'openai';
import { Stream } from 'openai/streaming';
import type { PluginStartContract as ActionsPluginStart } from '@kbn/actions-plugin/server';
import { loggerMock } from '@kbn/logging-mocks';
import { ActionsClientChatOpenAI, ActionsClientChatOpenAIParams } from './chat_openai';
import { mockActionResponse, mockChatCompletion } from './mocks';
const connectorId = 'mock-connector-id';
const mockExecute = jest.fn();
const mockLogger = loggerMock.create();
const mockActions = {
getActionsClientWithRequest: jest.fn().mockImplementation(() => ({
execute: mockExecute,
})),
} as unknown as ActionsPluginStart;
const chunk = {
object: 'chat.completion.chunk',
choices: [
{
delta: {
content: 'Single.',
},
},
],
};
export async function* asyncGenerator() {
// Mock implementation
yield chunk;
}
const mockStreamExecute = jest.fn();
const mockStreamActions = {
getActionsClientWithRequest: jest.fn().mockImplementation(() => ({
execute: mockStreamExecute,
})),
} as unknown as ActionsPluginStart;
const prompt = 'Do you know my name?';
const { signal } = new AbortController();
const mockRequest = {
params: { connectorId },
body: {
message: prompt,
subAction: 'invokeAI',
isEnabledKnowledgeBase: true,
},
} as ActionsClientChatOpenAIParams['request'];
const defaultArgs = {
actions: mockActions,
connectorId,
logger: mockLogger,
request: mockRequest,
streaming: false,
signal,
timeout: 999999,
temperature: 0.2,
};
describe('ActionsClientChatOpenAI', () => {
beforeEach(() => {
jest.clearAllMocks();
mockExecute.mockImplementation(() => ({
data: mockChatCompletion,
status: 'ok',
}));
});
describe('_llmType', () => {
it('returns the expected LLM type', () => {
const actionsClientChatOpenAI = new ActionsClientChatOpenAI(defaultArgs);
expect(actionsClientChatOpenAI._llmType()).toEqual('ActionsClientChatOpenAI');
});
it('returns the expected LLM type when overridden', () => {
const actionsClientChatOpenAI = new ActionsClientChatOpenAI({
...defaultArgs,
llmType: 'special-llm-type',
});
expect(actionsClientChatOpenAI._llmType()).toEqual('special-llm-type');
});
});
describe('completionWithRetry streaming: true', () => {
beforeEach(() => {
jest.clearAllMocks();
mockStreamExecute.mockImplementation(() => ({
data: {
consumerStream: asyncGenerator() as unknown as Stream<OpenAI.ChatCompletionChunk>,
tokenCountStream: asyncGenerator() as unknown as Stream<OpenAI.ChatCompletionChunk>,
},
status: 'ok',
}));
});
const defaultStreamingArgs: OpenAI.ChatCompletionCreateParamsStreaming = {
messages: [{ content: prompt, role: 'user' }],
stream: true,
model: 'gpt-4',
n: 99,
stop: ['a stop sequence'],
functions: [jest.fn()],
};
it('returns the expected data', async () => {
const actionsClientChatOpenAI = new ActionsClientChatOpenAI({
...defaultArgs,
streaming: true,
actions: mockStreamActions,
});
const result: AsyncIterable<OpenAI.ChatCompletionChunk> =
await actionsClientChatOpenAI.completionWithRetry(defaultStreamingArgs);
expect(mockStreamExecute).toHaveBeenCalledWith({
actionId: connectorId,
params: {
subActionParams: {
model: 'gpt-4',
messages: [{ role: 'user', content: 'Do you know my name?' }],
signal,
timeout: 999999,
n: defaultStreamingArgs.n,
stop: defaultStreamingArgs.stop,
functions: defaultStreamingArgs.functions,
temperature: 0.2,
},
subAction: 'invokeAsyncIterator',
},
signal,
});
expect(result).toEqual(asyncGenerator());
});
});
describe('completionWithRetry streaming: false', () => {
const defaultNonStreamingArgs: OpenAI.ChatCompletionCreateParamsNonStreaming = {
messages: [{ content: prompt, role: 'user' }],
stream: false,
model: 'gpt-4',
};
it('returns the expected data', async () => {
const actionsClientChatOpenAI = new ActionsClientChatOpenAI(defaultArgs);
const result: OpenAI.ChatCompletion = await actionsClientChatOpenAI.completionWithRetry(
defaultNonStreamingArgs
);
expect(mockExecute).toHaveBeenCalledWith({
actionId: connectorId,
params: {
subActionParams: {
body: '{"temperature":0.2,"model":"gpt-4","messages":[{"role":"user","content":"Do you know my name?"}]}',
signal,
timeout: 999999,
},
subAction: 'run',
},
signal,
});
expect(result.choices[0].message.content).toEqual(mockActionResponse.message);
});
it('rejects with the expected error when the action result status is error', async () => {
const hasErrorStatus = jest.fn().mockImplementation(() => ({
message: 'action-result-message',
serviceMessage: 'action-result-service-message',
status: 'error', // <-- error status
}));
const badActions = {
getActionsClientWithRequest: jest.fn().mockImplementation(() => ({
execute: hasErrorStatus,
})),
} as unknown as ActionsPluginStart;
const actionsClientChatOpenAI = new ActionsClientChatOpenAI({
...defaultArgs,
actions: badActions,
});
expect(actionsClientChatOpenAI.completionWithRetry(defaultNonStreamingArgs))
.rejects.toThrowError(
'ActionsClientChatOpenAI: action result status is error: action-result-message - action-result-service-message'
)
.catch(() => {
/* ...handle/report the error (or just suppress it, if that's appropriate
[which it sometimes, though rarely, is])...
*/
});
});
});
});

View file

@ -12,18 +12,13 @@ import { get } from 'lodash/fp';
import { ChatOpenAI } from '@langchain/openai';
import { Stream } from 'openai/streaming';
import {
ChatCompletion,
ChatCompletionChunk,
ChatCompletionCreateParamsStreaming,
ChatCompletionCreateParamsNonStreaming,
} from 'openai/resources/chat/completions';
import type OpenAI from 'openai';
import { DEFAULT_OPEN_AI_MODEL, DEFAULT_TIMEOUT } from './constants';
import { InvokeAIActionParamsSchema } from './types';
import { InvokeAIActionParamsSchema, RunActionParamsSchema } from './types';
const LLM_TYPE = 'ActionsClientChatOpenAI';
interface ActionsClientChatOpenAIParams {
export interface ActionsClientChatOpenAIParams {
actions: ActionsPluginStart;
connectorId: string;
llmType?: string;
@ -47,15 +42,14 @@ interface ActionsClientChatOpenAIParams {
* and iterates over the chunks to form the response.
*/
export class ActionsClientChatOpenAI extends ChatOpenAI {
// set streaming to true always
streaming = true;
streaming: boolean;
// Local `llmType` as it can change and needs to be accessed by abstract `_llmType()` method
// Not using getter as `this._llmType()` is called in the constructor via `super({})`
protected llmType: string;
// ChatOpenAI class needs these, but they do not matter as we override the openai client with the actions client
azureOpenAIApiKey = '';
openAIApiKey = '';
model?: string;
model: string;
#temperature?: number;
// Kibana variables
@ -78,12 +72,13 @@ export class ActionsClientChatOpenAI extends ChatOpenAI {
maxRetries,
model,
signal,
streaming = true,
temperature,
timeout,
}: ActionsClientChatOpenAIParams) {
super({
maxRetries,
streaming: true,
streaming,
// matters only for the LangSmith logs (Metadata > Invocation Params), which are misleading if this is not set
modelName: model ?? DEFAULT_OPEN_AI_MODEL,
// these have to be initialized, but are not actually used since we override the openai client with the actions client
@ -102,9 +97,9 @@ export class ActionsClientChatOpenAI extends ChatOpenAI {
this.#request = request;
this.#timeout = timeout;
this.#actionResultData = '';
this.streaming = true;
this.streaming = streaming;
this.#signal = signal;
this.model = model;
this.model = model ?? DEFAULT_OPEN_AI_MODEL;
// to be passed to the actions client
this.#temperature = temperature;
// matters only for LangSmith logs (Metadata > Invocation Params)
@ -128,20 +123,18 @@ export class ActionsClientChatOpenAI extends ChatOpenAI {
}
async completionWithRetry(
request: ChatCompletionCreateParamsStreaming
): Promise<AsyncIterable<ChatCompletionChunk>>;
request: OpenAI.ChatCompletionCreateParamsStreaming
): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>>;
async completionWithRetry(
request: ChatCompletionCreateParamsNonStreaming
): Promise<ChatCompletion>;
request: OpenAI.ChatCompletionCreateParamsNonStreaming
): Promise<OpenAI.ChatCompletion>;
async completionWithRetry(
completionRequest: ChatCompletionCreateParamsStreaming | ChatCompletionCreateParamsNonStreaming
): Promise<AsyncIterable<ChatCompletionChunk> | ChatCompletion> {
if (!completionRequest.stream) {
// fallback for typescript, should never be hit
return super.completionWithRetry(completionRequest);
}
completionRequest:
| OpenAI.ChatCompletionCreateParamsStreaming
| OpenAI.ChatCompletionCreateParamsNonStreaming
): Promise<AsyncIterable<OpenAI.ChatCompletionChunk> | OpenAI.ChatCompletion> {
return this.caller.call(async () => {
const requestBody = this.formatRequestForActionsClient(completionRequest);
this.#logger.debug(
@ -159,10 +152,17 @@ export class ActionsClientChatOpenAI extends ChatOpenAI {
throw new Error(`${LLM_TYPE}: ${actionResult?.message} - ${actionResult?.serviceMessage}`);
}
if (!this.streaming) {
// typecasting as the `run` subaction returns the OpenAI.ChatCompletion directly from OpenAI
const chatCompletion = get('data', actionResult) as OpenAI.ChatCompletion;
return chatCompletion;
}
// cast typing as this is the contract of the actions client
const result = get('data', actionResult) as {
consumerStream: Stream<ChatCompletionChunk>;
tokenCountStream: Stream<ChatCompletionChunk>;
consumerStream: Stream<OpenAI.ChatCompletionChunk>;
tokenCountStream: Stream<OpenAI.ChatCompletionChunk>;
};
if (result.consumerStream == null) {
@ -172,39 +172,47 @@ export class ActionsClientChatOpenAI extends ChatOpenAI {
return result.consumerStream;
});
}
formatRequestForActionsClient(completionRequest: ChatCompletionCreateParamsStreaming): {
formatRequestForActionsClient(
completionRequest:
| OpenAI.ChatCompletionCreateParamsNonStreaming
| OpenAI.ChatCompletionCreateParamsStreaming
): {
actionId: string;
params: {
subActionParams: InvokeAIActionParamsSchema;
subActionParams: InvokeAIActionParamsSchema | RunActionParamsSchema;
subAction: string;
};
signal?: AbortSignal;
} {
const body = {
temperature: this.#temperature,
// possible client model override
// security sends this from connectors, it is only missing from preconfigured connectors
// this should be undefined otherwise so the connector handles the model (stack_connector has access to preconfigured connector model values)
model: this.model,
// ensure we take the messages from the completion request, not the client request
n: completionRequest.n,
stop: completionRequest.stop,
functions: completionRequest.functions,
messages: completionRequest.messages.map((message) => ({
role: message.role,
content: message.content ?? '',
...('name' in message ? { name: message?.name } : {}),
...('function_call' in message ? { function_call: message?.function_call } : {}),
...('tool_calls' in message ? { tool_calls: message?.tool_calls } : {}),
...('tool_call_id' in message ? { tool_call_id: message?.tool_call_id } : {}),
})),
};
// create a new connector request body with the assistant message:
return {
actionId: this.#connectorId,
params: {
// stream must already be true here
// langchain expects stream to be of type AsyncIterator<ChatCompletionChunk>
subAction: 'invokeAsyncIterator',
// langchain expects stream to be of type AsyncIterator<OpenAI.ChatCompletionChunk>
// for non-stream, use `run` instead of `invokeAI` in order to get the entire OpenAI.ChatCompletion response,
// which may contain non-content messages like functions
subAction: completionRequest.stream ? 'invokeAsyncIterator' : 'run',
subActionParams: {
temperature: this.#temperature,
// possible client model override
// security sends this from connectors, it is only missing from preconfigured connectors
// this should be undefined otherwise so the connector handles the model (stack_connector has access to preconfigured connector model values)
model: this.model,
// ensure we take the messages from the completion request, not the client request
n: completionRequest.n,
stop: completionRequest.stop,
functions: completionRequest.functions,
messages: completionRequest.messages.map((message) => ({
role: message.role,
content: message.content ?? '',
...('name' in message ? { name: message?.name } : {}),
...('function_call' in message ? { function_call: message?.function_call } : {}),
...('tool_calls' in message ? { tool_calls: message?.tool_calls } : {}),
...('tool_call_id' in message ? { tool_call_id: message?.tool_call_id } : {}),
})),
...(completionRequest.stream ? body : { body: JSON.stringify(body) }),
signal: this.#signal,
// This timeout is large because LangChain prompts can be complicated and take a long time
timeout: this.#timeout ?? DEFAULT_TIMEOUT,

View file

@ -5,12 +5,11 @@
* 2.0.
*/
import { Message } from '../schemas';
import { getMessageContentAndRole } from './helpers';
describe('helpers', () => {
describe('getMessageContentAndRole', () => {
const testCases: Array<[string, Pick<Message, 'content' | 'role'>]> = [
const testCases: Array<[string, { content: string; role: string }]> = [
['Prompt 1', { content: 'Prompt 1', role: 'user' }],
['Prompt 2', { content: 'Prompt 2', role: 'user' }],
['', { content: '', role: 'user' }],

View file

@ -5,9 +5,7 @@
* 2.0.
*/
import { Message } from '../schemas';
export const getMessageContentAndRole = (prompt: string): Pick<Message, 'content' | 'role'> => ({
export const getMessageContentAndRole = (prompt: string): { content: string; role: string } => ({
content: prompt,
role: 'user',
});

View file

@ -7,3 +7,4 @@
export { ActionsClientChatOpenAI } from './chat_openai';
export { ActionsClientLlm } from './llm';
export { ActionsClientSimpleChatModel } from './simple_chat_model';

View file

@ -10,7 +10,7 @@ import type { PluginStartContract as ActionsPluginStart } from '@kbn/actions-plu
import { loggerMock } from '@kbn/logging-mocks';
import { ActionsClientLlm } from './llm';
import { mockActionResponse } from '../mock/mock_action_response';
import { mockActionResponse } from './mocks';
const connectorId = 'mock-connector-id';
@ -117,7 +117,7 @@ describe('ActionsClientLlm', () => {
request: mockRequest,
});
expect(actionsClientLlm._call(prompt)).rejects.toThrowError(
await expect(actionsClientLlm._call(prompt)).rejects.toThrowError(
'ActionsClientLlm: action result status is error: action-result-message - action-result-service-message'
);
});
@ -137,7 +137,7 @@ describe('ActionsClientLlm', () => {
request: mockRequest,
});
expect(actionsClientLlm._call(prompt)).rejects.toThrowError(
await expect(actionsClientLlm._call(prompt)).rejects.toThrowError(
'ActionsClientLlm: content should be a string, but it had an unexpected type: number'
);
});

View file

@ -0,0 +1,37 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type OpenAI from 'openai';
export const mockActionResponse = {
message: 'Yes, your name is Andrew. How can I assist you further, Andrew?',
usage: { prompt_tokens: 4, completion_tokens: 10, total_tokens: 14 },
};
export const mockChatCompletion: OpenAI.ChatCompletion = {
id: 'abc123',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: 'Yes, your name is Andrew. How can I assist you further, Andrew?',
},
finish_reason: 'stop',
logprobs: null,
},
],
created: 1684572400, // Unix timestamp example: May 20, 2023
model: 'gpt-4',
object: 'chat.completion',
system_fingerprint: 'fingerprint123',
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
};

View file

@ -0,0 +1,299 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { PassThrough } from 'stream';
import type { PluginStartContract as ActionsPluginStart } from '@kbn/actions-plugin/server';
import { loggerMock } from '@kbn/logging-mocks';
import { ActionsClientSimpleChatModel, CustomChatModelInput } from './simple_chat_model';
import { mockActionResponse } from './mocks';
import { BaseMessage } from '@langchain/core/messages';
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
import { parseBedrockStream } from '../utils/bedrock';
const connectorId = 'mock-connector-id';
const mockExecute = jest.fn();
const mockLogger = loggerMock.create();
const mockActions = {
getActionsClientWithRequest: jest.fn().mockImplementation(() => ({
execute: mockExecute,
})),
} as unknown as ActionsPluginStart;
const mockStreamExecute = jest.fn().mockImplementation(() => ({
data: new PassThrough(),
status: 'ok',
}));
const mockStreamActions = {
getActionsClientWithRequest: jest.fn().mockImplementation(() => ({
execute: mockStreamExecute,
})),
} as unknown as ActionsPluginStart;
const prompt = 'Do you know my name?';
const callMessages = [
{
lc_serializable: true,
lc_kwargs: {
content: 'Answer the following questions truthfully and as best you can.',
additional_kwargs: {},
response_metadata: {},
},
lc_namespace: ['langchain_core', 'messages'],
content: 'Answer the following questions truthfully and as best you can.',
name: undefined,
additional_kwargs: {},
response_metadata: {},
_getType: () => 'system',
},
{
lc_serializable: true,
lc_kwargs: {
content: 'Question: Do you know my name?\n\n',
additional_kwargs: {},
response_metadata: {},
},
lc_namespace: ['langchain_core', 'messages'],
content: 'Question: Do you know my name?\n\n',
name: undefined,
additional_kwargs: {},
response_metadata: {},
_getType: () => 'human',
},
] as unknown as BaseMessage[];
const callOptions = {
stop: ['\n'],
};
const handleLLMNewToken = jest.fn();
const callRunManager = {
handleLLMNewToken,
} as unknown as CallbackManagerForLLMRun;
const mockRequest: CustomChatModelInput['request'] = {
params: { connectorId },
body: {
message: prompt,
subAction: 'invokeAI',
isEnabledKnowledgeBase: true,
},
} as CustomChatModelInput['request'];
const defaultArgs = {
actions: mockActions,
connectorId,
logger: mockLogger,
request: mockRequest,
streaming: false,
};
jest.mock('../utils/bedrock');
describe('ActionsClientSimpleChatModel', () => {
beforeEach(() => {
jest.clearAllMocks();
mockExecute.mockImplementation(() => ({
data: mockActionResponse,
status: 'ok',
}));
});
describe('getActionResultData', () => {
it('returns the expected data', async () => {
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel(defaultArgs);
const result = await actionsClientSimpleChatModel._call(
callMessages,
callOptions,
callRunManager
);
expect(result).toEqual(mockActionResponse.message);
});
});
describe('_llmType', () => {
it('returns the expected LLM type', () => {
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel(defaultArgs);
expect(actionsClientSimpleChatModel._llmType()).toEqual('ActionsClientSimpleChatModel');
});
it('returns the expected LLM type when overridden', () => {
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel({
...defaultArgs,
llmType: 'special-llm-type',
});
expect(actionsClientSimpleChatModel._llmType()).toEqual('special-llm-type');
});
});
describe('_call streaming: false', () => {
it('returns the expected content when _call is invoked', async () => {
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel(defaultArgs);
const result = await actionsClientSimpleChatModel._call(
callMessages,
callOptions,
callRunManager
);
const subAction = mockExecute.mock.calls[0][0].params.subAction;
expect(subAction).toEqual('invokeAI');
expect(result).toEqual(mockActionResponse.message);
});
it('rejects with the expected error when the action result status is error', async () => {
const hasErrorStatus = jest.fn().mockImplementation(() => ({
message: 'action-result-message',
serviceMessage: 'action-result-service-message',
status: 'error', // <-- error status
}));
const badActions = {
getActionsClientWithRequest: jest.fn().mockImplementation(() => ({
execute: hasErrorStatus,
})),
} as unknown as ActionsPluginStart;
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel({
...defaultArgs,
actions: badActions,
});
await expect(
actionsClientSimpleChatModel._call(callMessages, callOptions, callRunManager)
).rejects.toThrowError(
'ActionsClientSimpleChatModel: action result status is error: action-result-message - action-result-service-message'
);
});
it('rejects with the expected error the message has invalid content', async () => {
const invalidContent = { message: 1234 };
mockExecute.mockImplementation(() => ({
data: invalidContent,
status: 'ok',
}));
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel(defaultArgs);
await expect(
actionsClientSimpleChatModel._call(callMessages, callOptions, callRunManager)
).rejects.toThrowError(
'ActionsClientSimpleChatModel: content should be a string, but it had an unexpected type: number'
);
});
it('throws multimodal error', async () => {
const invalidContent = { message: 1234 };
mockExecute.mockImplementation(() => ({
data: invalidContent,
status: 'ok',
}));
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel(defaultArgs);
await expect(
actionsClientSimpleChatModel._call(
// @ts-ignore
[{ ...callMessages[0], content: null }],
callOptions,
callRunManager
)
).rejects.toThrowError('Multimodal messages are not supported');
});
});
describe('_call streaming: true', () => {
beforeEach(() => {
(parseBedrockStream as jest.Mock).mockResolvedValue(mockActionResponse.message);
});
it('returns the expected content when _call is invoked with streaming and llmType is Bedrock', async () => {
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel({
...defaultArgs,
actions: mockStreamActions,
llmType: 'bedrock',
streaming: true,
});
const result = await actionsClientSimpleChatModel._call(
callMessages,
callOptions,
callRunManager
);
const subAction = mockStreamExecute.mock.calls[0][0].params.subAction;
expect(subAction).toEqual('invokeStream');
expect(result).toEqual(mockActionResponse.message);
});
it('returns the expected content when _call is invoked with streaming and llmType is Gemini', async () => {
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel({
...defaultArgs,
actions: mockActions,
llmType: 'gemini',
streaming: true,
});
const result = await actionsClientSimpleChatModel._call(
callMessages,
callOptions,
callRunManager
);
const subAction = mockExecute.mock.calls[0][0].params.subAction;
expect(subAction).toEqual('invokeAI');
expect(result).toEqual(mockActionResponse.message);
});
it('does not call handleLLMNewToken until the final answer', async () => {
(parseBedrockStream as jest.Mock).mockImplementation((_1, _2, _3, handleToken) => {
handleToken('token1');
handleToken('token2');
handleToken('token3');
handleToken('token4');
handleToken('token5');
handleToken(`"action":`);
handleToken(`"Final Answer"`);
handleToken(`, "action_input": "`);
handleToken('token6');
});
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel({
...defaultArgs,
actions: mockStreamActions,
llmType: 'bedrock',
streaming: true,
});
await actionsClientSimpleChatModel._call(callMessages, callOptions, callRunManager);
expect(handleLLMNewToken).toHaveBeenCalledTimes(1);
expect(handleLLMNewToken).toHaveBeenCalledWith('token6');
});
it('does not call handleLLMNewToken after the final output ends', async () => {
(parseBedrockStream as jest.Mock).mockImplementation((_1, _2, _3, handleToken) => {
handleToken('token5');
handleToken(`"action":`);
handleToken(`"Final Answer"`);
handleToken(`, "action_input": "`);
handleToken('token6');
handleToken('"');
handleToken('token7');
});
const actionsClientSimpleChatModel = new ActionsClientSimpleChatModel({
...defaultArgs,
actions: mockStreamActions,
llmType: 'bedrock',
streaming: true,
});
await actionsClientSimpleChatModel._call(callMessages, callOptions, callRunManager);
expect(handleLLMNewToken).toHaveBeenCalledTimes(1);
expect(handleLLMNewToken).toHaveBeenCalledWith('token6');
});
});
});

View file

@ -0,0 +1,190 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { Readable } from 'stream';
import {
SimpleChatModel,
type BaseChatModelParams,
} from '@langchain/core/language_models/chat_models';
import { type BaseMessage } from '@langchain/core/messages';
import type { PluginStartContract as ActionsPluginStart } from '@kbn/actions-plugin/server';
import { Logger } from '@kbn/logging';
import { KibanaRequest } from '@kbn/core-http-server';
import { v4 as uuidv4 } from 'uuid';
import { get } from 'lodash/fp';
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
import { parseBedrockStream } from '../utils/bedrock';
import { getDefaultArguments } from './constants';
export const getMessageContentAndRole = (prompt: string, role = 'user') => ({
content: prompt,
role: role === 'human' ? 'user' : role,
});
export interface CustomChatModelInput extends BaseChatModelParams {
actions: ActionsPluginStart;
connectorId: string;
logger: Logger;
llmType?: string;
signal?: AbortSignal;
model?: string;
temperature?: number;
request: KibanaRequest;
streaming: boolean;
}
export class ActionsClientSimpleChatModel extends SimpleChatModel {
#actions: ActionsPluginStart;
#connectorId: string;
#logger: Logger;
#request: KibanaRequest;
#traceId: string;
#signal?: AbortSignal;
llmType: string;
streaming: boolean;
model?: string;
temperature?: number;
constructor({
actions,
connectorId,
llmType,
logger,
model,
request,
temperature,
signal,
streaming,
}: CustomChatModelInput) {
super({});
this.#actions = actions;
this.#connectorId = connectorId;
this.#traceId = uuidv4();
this.#logger = logger;
this.#signal = signal;
this.#request = request;
this.llmType = llmType ?? 'ActionsClientSimpleChatModel';
this.model = model;
this.temperature = temperature;
// only enable streaming for bedrock
this.streaming = streaming && llmType === 'bedrock';
}
_llmType() {
return this.llmType;
}
// Model type needs to be `base_chat_model` to work with LangChain OpenAI Tools
// We may want to make this configurable (ala _llmType) if different agents end up requiring different model types
// See: https://github.com/langchain-ai/langchainjs/blob/fb699647a310c620140842776f4a7432c53e02fa/langchain/src/agents/openai/index.ts#L185
_modelType() {
return 'base_chat_model';
}
async _call(
messages: BaseMessage[],
options: this['ParsedCallOptions'],
runManager?: CallbackManagerForLLMRun
): Promise<string> {
if (!messages.length) {
throw new Error('No messages provided.');
}
const formattedMessages = [];
if (messages.length === 2) {
messages.forEach((message, i) => {
if (typeof message.content !== 'string') {
throw new Error('Multimodal messages are not supported.');
}
formattedMessages.push(getMessageContentAndRole(message.content, message._getType()));
});
} else {
if (typeof messages[0].content !== 'string') {
throw new Error('Multimodal messages are not supported.');
}
formattedMessages.push(getMessageContentAndRole(messages[0].content));
}
this.#logger.debug(
`ActionsClientSimpleChatModel#_call\ntraceId: ${
this.#traceId
}\nassistantMessage:\n${JSON.stringify(formattedMessages)} `
);
// create a new connector request body with the assistant message:
const requestBody = {
actionId: this.#connectorId,
params: {
subAction: this.streaming ? 'invokeStream' : 'invokeAI',
subActionParams: {
model: this.model,
messages: formattedMessages,
...getDefaultArguments(this.llmType, this.temperature, options.stop),
},
},
};
// create an actions client from the authenticated request context:
const actionsClient = await this.#actions.getActionsClientWithRequest(this.#request);
const actionResult = await actionsClient.execute(requestBody);
if (actionResult.status === 'error') {
throw new Error(
`ActionsClientSimpleChatModel: action result status is error: ${actionResult?.message} - ${actionResult?.serviceMessage}`
);
}
if (!this.streaming) {
const content = get('data.message', actionResult);
if (typeof content !== 'string') {
throw new Error(
`ActionsClientSimpleChatModel: content should be a string, but it had an unexpected type: ${typeof content}`
);
}
return content; // per the contact of _call, return a string
}
// Bedrock streaming
const readable = get('data', actionResult) as Readable;
if (typeof readable?.read !== 'function') {
throw new Error('Action result status is error: result is not streamable');
}
let currentOutput = '';
let finalOutputIndex = -1;
const finalOutputStartToken = '"action":"FinalAnswer","action_input":"';
let streamingFinished = false;
const finalOutputStopRegex = /(?<!\\)\"/;
const handleLLMNewToken = async (token: string) => {
if (finalOutputIndex === -1) {
// Remove whitespace to simplify parsing
currentOutput += token.replace(/\s/g, '');
if (currentOutput.includes(finalOutputStartToken)) {
finalOutputIndex = currentOutput.indexOf(finalOutputStartToken);
}
} else if (!streamingFinished) {
const finalOutputEndIndex = token.search(finalOutputStopRegex);
if (finalOutputEndIndex !== -1) {
streamingFinished = true;
} else {
await runManager?.handleLLMNewToken(token);
}
}
};
const parsed = await parseBedrockStream(
readable,
this.#logger,
this.#signal,
handleLLMNewToken
);
return parsed; // per the contact of _call, return a string
}
}

View file

@ -6,15 +6,12 @@
*/
import { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
import {
ChatCompletionContentPart,
ChatCompletionCreateParamsNonStreaming,
} from 'openai/resources/chat/completions';
import type OpenAI from 'openai';
export interface InvokeAIActionParamsSchema {
messages: Array<{
role: string;
content: string | ChatCompletionContentPart[];
content: string | OpenAI.ChatCompletionContentPart[];
name?: string;
function_call?: {
arguments: string;
@ -32,11 +29,16 @@ export interface InvokeAIActionParamsSchema {
}>;
tool_call_id?: string;
}>;
model?: ChatCompletionCreateParamsNonStreaming['model'];
n?: ChatCompletionCreateParamsNonStreaming['n'];
stop?: ChatCompletionCreateParamsNonStreaming['stop'];
temperature?: ChatCompletionCreateParamsNonStreaming['temperature'];
functions?: ChatCompletionCreateParamsNonStreaming['functions'];
model?: OpenAI.ChatCompletionCreateParamsNonStreaming['model'];
n?: OpenAI.ChatCompletionCreateParamsNonStreaming['n'];
stop?: OpenAI.ChatCompletionCreateParamsNonStreaming['stop'];
temperature?: OpenAI.ChatCompletionCreateParamsNonStreaming['temperature'];
functions?: OpenAI.ChatCompletionCreateParamsNonStreaming['functions'];
signal?: AbortSignal;
timeout?: number;
}
export interface RunActionParamsSchema {
body: string;
signal?: AbortSignal;
timeout?: number;
}

View file

@ -7,7 +7,7 @@
import { EventStreamCodec } from '@smithy/eventstream-codec';
import { fromUtf8, toUtf8 } from '@smithy/util-utf8';
import { handleBedrockChunk } from '../..';
import { handleBedrockChunk } from './bedrock';
const getContentBlockDelta = (completion: string) => ({
type: 'content_block_delta',

View file

@ -0,0 +1,208 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { finished } from 'stream/promises';
import { Readable } from 'stream';
import { Logger } from '@kbn/core/server';
import { EventStreamCodec } from '@smithy/eventstream-codec';
import { fromUtf8, toUtf8 } from '@smithy/util-utf8';
type StreamParser = (
responseStream: Readable,
logger: Logger,
abortSignal?: AbortSignal,
tokenHandler?: (token: string) => void
) => Promise<string>;
export const parseBedrockStream: StreamParser = async (
responseStream,
logger,
abortSignal,
tokenHandler
) => {
const responseBuffer: Uint8Array[] = [];
if (abortSignal) {
abortSignal.addEventListener('abort', () => {
responseStream.destroy(new Error('Aborted'));
return parseBedrockBuffer(responseBuffer, logger);
});
}
responseStream.on('data', (chunk) => {
// special encoding for bedrock, do not attempt to convert to string
responseBuffer.push(chunk);
if (tokenHandler) {
// Initialize an empty Uint8Array to store the concatenated buffer.
const bedrockBuffer: Uint8Array = new Uint8Array(0);
handleBedrockChunk({ chunk, bedrockBuffer, logger, chunkHandler: tokenHandler });
}
});
await finished(responseStream).catch((err) => {
if (abortSignal?.aborted) {
logger.info('Bedrock stream parsing was aborted.');
} else {
throw err;
}
});
return parseBedrockBuffer(responseBuffer, logger);
};
/**
* Parses a Bedrock buffer from an array of chunks.
*
* @param {Uint8Array[]} chunks - Array of Uint8Array chunks to be parsed.
* @returns {string} - Parsed string from the Bedrock buffer.
*/
const parseBedrockBuffer = (chunks: Uint8Array[], logger: Logger): string => {
// Initialize an empty Uint8Array to store the concatenated buffer.
let bedrockBuffer: Uint8Array = new Uint8Array(0);
// Map through each chunk to process the Bedrock buffer.
return chunks
.map((chunk) => {
const processedChunk = handleBedrockChunk({ chunk, bedrockBuffer, logger });
bedrockBuffer = processedChunk.bedrockBuffer;
return processedChunk.decodedChunk;
})
.join('');
};
/**
* Handle a chunk of data from the bedrock API.
* @param chunk - The chunk of data to process.
* @param bedrockBuffer - The buffer containing the current data.
* @param chunkHandler - Optional function to handle the chunk once it has been processed.
* @returns {decodedChunk, bedrockBuffer } - The decoded chunk and the updated buffer.
*/
export const handleBedrockChunk = ({
chunk,
bedrockBuffer,
chunkHandler,
logger,
}: {
chunk: Uint8Array;
bedrockBuffer: Uint8Array;
chunkHandler?: (chunk: string) => void;
logger?: Logger;
}): { decodedChunk: string; bedrockBuffer: Uint8Array } => {
// Concatenate the current chunk to the existing buffer.
let newBuffer = concatChunks(bedrockBuffer, chunk);
// Get the length of the next message in the buffer.
let messageLength = getMessageLength(newBuffer);
// Initialize an array to store fully formed message chunks.
const buildChunks = [];
// Process the buffer until no complete messages are left.
while (newBuffer.byteLength > 0 && newBuffer.byteLength >= messageLength) {
// Extract a chunk of the specified length from the buffer.
const extractedChunk = newBuffer.slice(0, messageLength);
// Add the extracted chunk to the array of fully formed message chunks.
buildChunks.push(extractedChunk);
// Remove the processed chunk from the buffer.
newBuffer = newBuffer.slice(messageLength);
// Get the length of the next message in the updated buffer.
messageLength = getMessageLength(newBuffer);
}
const awsDecoder = new EventStreamCodec(toUtf8, fromUtf8);
// Decode and parse each message chunk, extracting the completion.
const decodedChunk = buildChunks
.map((bChunk) => {
const event = awsDecoder.decode(bChunk);
const body = JSON.parse(
Buffer.from(JSON.parse(new TextDecoder().decode(event.body)).bytes, 'base64').toString()
);
const decodedContent = prepareBedrockOutput(body, logger);
if (chunkHandler) {
chunkHandler(decodedContent);
}
return decodedContent;
})
.join('');
return { decodedChunk, bedrockBuffer: newBuffer };
};
/**
* Gets the length of the next message from the buffer.
*
* @param {Uint8Array} buffer - Buffer containing the message.
* @returns {number} - Length of the next message.
*/
function getMessageLength(buffer: Uint8Array): number {
// If the buffer is empty, return 0.
if (buffer.byteLength === 0) return 0;
// Create a DataView to read the Uint32 value at the beginning of the buffer.
const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength);
// Read and return the Uint32 value (message length).
return view.getUint32(0, false);
}
/**
* Concatenates two Uint8Array buffers.
*
* @param {Uint8Array} a - First buffer.
* @param {Uint8Array} b - Second buffer.
* @returns {Uint8Array} - Concatenated buffer.
*/
function concatChunks(a: Uint8Array, b: Uint8Array): Uint8Array {
const newBuffer = new Uint8Array(a.length + b.length);
// Copy the contents of the first buffer to the new buffer.
newBuffer.set(a);
// Copy the contents of the second buffer to the new buffer starting from the end of the first buffer.
newBuffer.set(b, a.length);
return newBuffer;
}
interface CompletionChunk {
type?: string;
delta?: {
type?: string;
text?: string;
stop_reason?: null | string;
stop_sequence?: null | string;
};
message?: { content: Array<{ text?: string; type: string }> };
content_block?: { type: string; text: string };
}
/**
* Prepare the streaming output from the bedrock API
* @param responseBody
* @returns string
*/
const prepareBedrockOutput = (responseBody: CompletionChunk, logger?: Logger): string => {
if (responseBody.type && responseBody.type.length) {
if (responseBody.type === 'message_start' && responseBody.message) {
return parseContent(responseBody.message.content);
} else if (
responseBody.type === 'content_block_delta' &&
responseBody.delta?.type === 'text_delta' &&
typeof responseBody.delta?.text === 'string'
) {
return responseBody.delta.text;
}
}
logger?.warn(`Failed to parse bedrock chunk ${JSON.stringify(responseBody)}`);
return '';
};
/**
* Parse the content from the bedrock API
* @param content
* @returns string
*/
function parseContent(content: Array<{ text?: string; type: string }>): string {
let parsedContent = '';
if (content.length === 1 && content[0].type === 'text' && content[0].text) {
parsedContent = content[0].text;
} else if (content.length > 1) {
// this case should not happen, but here is a fallback
parsedContent = content.reduce((acc, { text }) => (text ? `${acc}\n${text}` : acc), '');
}
return parsedContent;
}

View file

@ -0,0 +1,23 @@
{
"extends": "../../../tsconfig.base.json",
"compilerOptions": {
"outDir": "target/types",
"types": [
"jest",
"node"
]
},
"include": [
"**/*.ts",
],
"exclude": [
"target/**/*"
],
"kbn_references": [
"@kbn/core",
"@kbn/logging",
"@kbn/actions-plugin",
"@kbn/logging-mocks",
"@kbn/core-http-server"
]
}

View file

@ -17,17 +17,18 @@ import { langChainMessages } from '../../../__mocks__/lang_chain_messages';
import { KNOWLEDGE_BASE_INDEX_PATTERN } from '../../../routes/knowledge_base/constants';
import { callAgentExecutor } from '.';
import { PassThrough, Stream } from 'stream';
import {
ActionsClientChatOpenAI,
ActionsClientLlm,
} from '@kbn/elastic-assistant-common/impl/language_models';
import { ActionsClientChatOpenAI, ActionsClientSimpleChatModel } from '@kbn/langchain/server';
import { AgentExecutorParams } from '../executors/types';
import { ElasticsearchStore } from '../elasticsearch_store/elasticsearch_store';
jest.mock('@kbn/elastic-assistant-common/impl/language_models', () => ({
ActionsClientChatOpenAI: jest.fn(),
ActionsClientLlm: jest.fn(),
}));
jest.mock('@kbn/langchain/server', () => {
const original = jest.requireActual('@kbn/langchain/server');
return {
...original,
ActionsClientChatOpenAI: jest.fn(),
ActionsClientSimpleChatModel: jest.fn(),
};
});
const mockConversationChain = {
call: jest.fn(),
@ -106,6 +107,10 @@ const defaultProps: AgentExecutorParams<true> = {
request: mockRequest,
replacements: {},
};
const bedrockProps = {
...defaultProps,
llmType: 'bedrock',
};
const executorMock = initializeAgentExecutorWithOptions as jest.Mock;
describe('callAgentExecutor', () => {
beforeEach(() => {
@ -139,140 +144,141 @@ describe('callAgentExecutor', () => {
expect(mockCall.mock.calls[0][0].input).toEqual('What is my name?');
});
});
describe('when the agent is not streaming', () => {
it('creates an instance of ActionsClientLlm with the expected context from the request', async () => {
await callAgentExecutor(defaultProps);
expect(ActionsClientLlm).toHaveBeenCalledWith({
actions: mockActions,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
request: mockRequest,
streaming: false,
temperature: 0.2,
llmType: 'openai',
});
});
describe('OpenAI', () => {
describe('when the agent is not streaming', () => {
it('creates an instance of ActionsClientChatOpenAI with the expected context from the request', async () => {
await callAgentExecutor(defaultProps);
it('uses the chat-conversational-react-description agent type', async () => {
await callAgentExecutor(defaultProps);
expect(mockCall.mock.calls[0][0].agentType).toEqual('chat-conversational-react-description');
});
it('uses the DynamicTool version of ESQLKnowledgeBaseTool', async () => {
await callAgentExecutor({
...defaultProps,
assistantTools: [
{
name: 'ESQLKnowledgeBaseTool',
id: 'esql-knowledge-base-tool',
description: '',
sourceRegister: '',
isSupported: jest.fn(),
getTool: jest.fn().mockReturnValue(() => 'ESQLKnowledgeBaseTool'),
},
{
name: 'ESQLKnowledgeBaseStructuredTool',
id: 'esql-knowledge-base-structured-tool',
description: '',
sourceRegister: '',
isSupported: jest.fn(),
getTool: jest.fn().mockReturnValue(() => 'ESQLKnowledgeBaseStructuredTool'),
},
{
name: 'UnrelatedTool',
id: 'unrelated-tool',
description: '',
sourceRegister: '',
isSupported: jest.fn(),
getTool: jest.fn().mockReturnValue(() => 'UnrelatedTool'),
},
],
expect(ActionsClientChatOpenAI).toHaveBeenCalledWith({
actions: mockActions,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
request: mockRequest,
streaming: false,
temperature: 0.2,
llmType: 'openai',
});
});
expect(executorMock.mock.calls[0][0].length).toEqual(2);
expect(executorMock.mock.calls[0][0][0]()).toEqual('ESQLKnowledgeBaseTool');
it('uses the openai-functions agent type', async () => {
await callAgentExecutor(defaultProps);
expect(mockCall.mock.calls[0][0].agentType).toEqual('openai-functions');
});
it('returns the expected response', async () => {
const result = await callAgentExecutor(defaultProps);
expect(result).toEqual({
body: {
connector_id: 'mock-connector-id',
data: mockActionResponse,
status: 'ok',
replacements: {},
trace_data: undefined,
},
headers: {
'content-type': 'application/json',
},
});
});
});
describe('when the agent is streaming', () => {
it('creates an instance of ActionsClientChatOpenAI with the expected context from the request', async () => {
await callAgentExecutor({ ...defaultProps, isStream: true });
it('returns the expected response', async () => {
const result = await callAgentExecutor(defaultProps);
expect(ActionsClientChatOpenAI).toHaveBeenCalledWith({
actions: mockActions,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
request: mockRequest,
streaming: true,
temperature: 0.2,
llmType: 'openai',
});
});
expect(result).toEqual({
body: {
connector_id: 'mock-connector-id',
data: mockActionResponse,
status: 'ok',
replacements: {},
trace_data: undefined,
},
headers: {
'content-type': 'application/json',
},
it('uses the openai-functions agent type', async () => {
await callAgentExecutor({ ...defaultProps, isStream: true });
expect(mockInvoke.mock.calls[0][0].agentType).toEqual('openai-functions');
});
});
});
describe('when the agent is streaming', () => {
it('creates an instance of ActionsClientChatOpenAI with the expected context from the request', async () => {
await callAgentExecutor({ ...defaultProps, isStream: true });
expect(ActionsClientChatOpenAI).toHaveBeenCalledWith({
actions: mockActions,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
request: mockRequest,
streaming: true,
temperature: 0.2,
llmType: 'openai',
});
});
describe('Bedrock', () => {
describe('when the agent is not streaming', () => {
it('creates an instance of ActionsClientSimpleChatModel with the expected context from the request', async () => {
await callAgentExecutor(bedrockProps);
it('uses the openai-functions agent type', async () => {
await callAgentExecutor({ ...defaultProps, isStream: true });
expect(mockInvoke.mock.calls[0][0].agentType).toEqual('openai-functions');
});
it('uses the DynamicStructuredTool version of ESQLKnowledgeBaseTool', async () => {
await callAgentExecutor({
...defaultProps,
isStream: true,
assistantTools: [
{
name: 'ESQLKnowledgeBaseTool',
id: 'esql-knowledge-base-tool',
description: '',
sourceRegister: '',
isSupported: jest.fn(),
getTool: jest.fn().mockReturnValue(() => 'ESQLKnowledgeBaseTool'),
},
{
name: 'ESQLKnowledgeBaseStructuredTool',
id: 'esql-knowledge-base-structured-tool',
description: '',
sourceRegister: '',
isSupported: jest.fn(),
getTool: jest.fn().mockReturnValue(() => 'ESQLKnowledgeBaseStructuredTool'),
},
{
name: 'UnrelatedTool',
id: 'unrelated-tool',
description: '',
sourceRegister: '',
isSupported: jest.fn(),
getTool: jest.fn().mockReturnValue(() => 'UnrelatedTool'),
},
],
expect(ActionsClientSimpleChatModel).toHaveBeenCalledWith({
actions: mockActions,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
request: mockRequest,
streaming: false,
temperature: 0,
llmType: 'bedrock',
});
});
expect(executorMock.mock.calls[0][0].length).toEqual(2);
expect(executorMock.mock.calls[0][0][0]()).toEqual('ESQLKnowledgeBaseStructuredTool');
});
it('uses the structured-chat-zero-shot-react-description agent type', async () => {
await callAgentExecutor(bedrockProps);
expect(mockCall.mock.calls[0][0].agentType).toEqual(
'structured-chat-zero-shot-react-description'
);
});
it('returns the expected response', async () => {
const result = await callAgentExecutor(bedrockProps);
expect(result).toEqual({
body: {
connector_id: 'mock-connector-id',
data: mockActionResponse,
status: 'ok',
replacements: {},
trace_data: undefined,
},
headers: {
'content-type': 'application/json',
},
});
});
});
describe('when the agent is streaming', () => {
it('creates an instance of ActionsClientSimpleChatModel with the expected context from the request', async () => {
await callAgentExecutor({ ...bedrockProps, isStream: true });
expect(ActionsClientSimpleChatModel).toHaveBeenCalledWith({
actions: mockActions,
connectorId: mockConnectorId,
logger: mockLogger,
maxRetries: 0,
request: mockRequest,
streaming: true,
temperature: 0,
llmType: 'bedrock',
});
});
it('uses the structured-chat-zero-shot-react-description agent type', async () => {
await callAgentExecutor({ ...bedrockProps, isStream: true });
expect(mockInvoke.mock.calls[0][0].agentType).toEqual(
'structured-chat-zero-shot-react-description'
);
});
});
});
describe.each([
['OpenAI', defaultProps],
['Bedrock', bedrockProps],
])('Common streaming tests - %s', (_, theProps) => {
it('returns the expected response', async () => {
const result = await callAgentExecutor({ ...defaultProps, isStream: true });
const result = await callAgentExecutor({ ...theProps, isStream: true });
expect(result.body).toBeInstanceOf(Stream.PassThrough);
expect(result.headers).toEqual({
'Cache-Control': 'no-cache',
@ -299,7 +305,7 @@ describe('callAgentExecutor', () => {
})
);
const onLlmResponse = jest.fn(async () => {}); // We need it to be a promise, or it'll crash because of missing `.catch`
await callAgentExecutor({ ...defaultProps, onLlmResponse, isStream: true });
await callAgentExecutor({ ...theProps, onLlmResponse, isStream: true });
expect(onLlmResponse).toHaveBeenCalledWith(
'hello',
@ -328,7 +334,7 @@ describe('callAgentExecutor', () => {
})
);
const onLlmResponse = jest.fn(async () => {}); // We need it to be a promise, or it'll crash because of missing `.catch`
await callAgentExecutor({ ...defaultProps, onLlmResponse, isStream: true });
await callAgentExecutor({ ...theProps, onLlmResponse, isStream: true });
expect(mockPush).toHaveBeenCalledWith({ payload: 'hi', type: 'content' });
expect(mockPush).not.toHaveBeenCalledWith({ payload: 'hey', type: 'content' });
@ -353,7 +359,7 @@ describe('callAgentExecutor', () => {
})
);
const onLlmResponse = jest.fn();
await callAgentExecutor({ ...defaultProps, onLlmResponse, isStream: true });
await callAgentExecutor({ ...theProps, onLlmResponse, isStream: true });
expect(mockPush).toHaveBeenCalledWith({ payload: 'hi', type: 'content' });
expect(mockPush).toHaveBeenCalledWith({ payload: 'hey', type: 'content' });

View file

@ -13,18 +13,18 @@ import { streamFactory } from '@kbn/ml-response-stream/server';
import { transformError } from '@kbn/securitysolution-es-utils';
import { RetrievalQAChain } from 'langchain/chains';
import {
getDefaultArguments,
ActionsClientChatOpenAI,
ActionsClientLlm,
} from '@kbn/elastic-assistant-common/impl/language_models';
import { getDefaultArguments } from '@kbn/elastic-assistant-common/impl/language_models/constants';
ActionsClientSimpleChatModel,
} from '@kbn/langchain/server';
import { AgentExecutor } from '../executors/types';
import { withAssistantSpan } from '../tracers/with_assistant_span';
import { APMTracer } from '../tracers/apm_tracer';
import { AssistantToolParams } from '../../../types';
import { withAssistantSpan } from '../tracers/with_assistant_span';
export const DEFAULT_AGENT_EXECUTOR_ID = 'Elastic AI Assistant Agent Executor';
/**
* The default agent executor used by the Elastic AI Assistant. Main agent/chain that wraps the ActionsClientLlm,
* The default agent executor used by the Elastic AI Assistant. Main agent/chain that wraps the ActionsClientSimpleChatModel,
* sets up a conversation BufferMemory from chat history, and registers tools like the ESQLKnowledgeBaseTool.
*
*/
@ -49,9 +49,8 @@ export const callAgentExecutor: AgentExecutor<true | false> = async ({
size,
traceOptions,
}) => {
// TODO implement llmClass for bedrock streaming
// tracked here: https://github.com/elastic/security-team/issues/7363
const llmClass = isStream ? ActionsClientChatOpenAI : ActionsClientLlm;
const isOpenAI = llmType === 'openai';
const llmClass = isOpenAI ? ActionsClientChatOpenAI : ActionsClientSimpleChatModel;
const llm = new llmClass({
actions,
@ -103,13 +102,9 @@ export const callAgentExecutor: AgentExecutor<true | false> = async ({
size,
};
const tools: ToolInterface[] = assistantTools
.filter((tool) =>
isStream
? tool.id !== 'esql-knowledge-base-tool'
: tool.id !== 'esql-knowledge-base-structured-tool'
)
.flatMap((tool) => tool.getTool(assistantToolParams) ?? []);
const tools: ToolInterface[] = assistantTools.flatMap(
(tool) => tool.getTool(assistantToolParams) ?? []
);
logger.debug(`applicable tools: ${JSON.stringify(tools.map((t) => t.name).join(', '), null, 2)}`);
@ -118,15 +113,20 @@ export const callAgentExecutor: AgentExecutor<true | false> = async ({
verbose: false,
handleParsingErrors: 'Try again, paying close attention to the allowed tool input',
};
// isStream check is not on agentType alone because typescript doesn't like
const executor = isStream
// isOpenAI check is not on agentType alone because typescript doesn't like
const executor = isOpenAI
? await initializeAgentExecutorWithOptions(tools, llm, {
agentType: 'openai-functions',
...executorArgs,
})
: await initializeAgentExecutorWithOptions(tools, llm, {
agentType: 'chat-conversational-react-description',
agentType: 'structured-chat-zero-shot-react-description',
...executorArgs,
returnIntermediateSteps: false,
agentArgs: {
// this is important to help LangChain correctly format tool input
humanMessageTemplate: `Question: {input}\n\n{agent_scratchpad}`,
},
});
// Sets up tracer for tracing executions to APM. See x-pack/plugins/elastic_assistant/server/lib/langchain/tracers/README.mdx

View file

@ -10,7 +10,7 @@ import { RetrievalQAChain } from 'langchain/chains';
import { BufferMemory, ChatMessageHistory } from 'langchain/memory';
import { ChainTool } from 'langchain/tools/chain';
import { ActionsClientLlm } from '@kbn/elastic-assistant-common/impl/language_models';
import { ActionsClientLlm } from '@kbn/langchain/server';
import { AgentExecutor } from './types';
import { withAssistantSpan } from '../tracers/with_assistant_span';
import { APMTracer } from '../tracers/apm_tracer';

View file

@ -6,14 +6,14 @@
*/
import { Readable } from 'stream';
import { finished } from 'stream/promises';
import { handleBedrockChunk } from '@kbn/elastic-assistant-common';
import { Logger } from '@kbn/core/server';
import { parseBedrockStream } from '@kbn/langchain/server';
type StreamParser = (
responseStream: Readable,
logger: Logger,
abortSignal?: AbortSignal
abortSignal?: AbortSignal,
tokenHandler?: (token: string) => void
) => Promise<string>;
export const handleStreamStorage = async ({
@ -87,47 +87,3 @@ const parseOpenAIResponse = (responseBody: string) =>
const msg = line.choices[0].delta;
return prev + (msg.content || '');
}, '');
const parseBedrockStream: StreamParser = async (responseStream, logger, abortSignal) => {
const responseBuffer: Uint8Array[] = [];
if (abortSignal) {
abortSignal.addEventListener('abort', () => {
responseStream.destroy(new Error('Aborted'));
return parseBedrockBuffer(responseBuffer, logger);
});
}
responseStream.on('data', (chunk) => {
// special encoding for bedrock, do not attempt to convert to string
responseBuffer.push(chunk);
});
await finished(responseStream).catch((err) => {
if (abortSignal?.aborted) {
logger.info('Bedrock stream parsing was aborted.');
} else {
throw err;
}
});
return parseBedrockBuffer(responseBuffer, logger);
};
/**
* Parses a Bedrock buffer from an array of chunks.
*
* @param {Uint8Array[]} chunks - Array of Uint8Array chunks to be parsed.
* @returns {string} - Parsed string from the Bedrock buffer.
*/
const parseBedrockBuffer = (chunks: Uint8Array[], logger: Logger): string => {
// Initialize an empty Uint8Array to store the concatenated buffer.
let bedrockBuffer: Uint8Array = new Uint8Array(0);
// Map through each chunk to process the Bedrock buffer.
return chunks
.map((chunk) => {
const processedChunk = handleBedrockChunk({ chunk, bedrockBuffer, logger });
bedrockBuffer = processedChunk.bedrockBuffer;
return processedChunk.decodedChunk;
})
.join('');
};

View file

@ -12,9 +12,9 @@ import {
ExecuteConnectorRequestBody,
Replacements,
} from '@kbn/elastic-assistant-common';
import { ActionsClientLlm } from '@kbn/elastic-assistant-common/impl/language_models';
import { AnonymizationFieldResponse } from '@kbn/elastic-assistant-common/impl/schemas/anonymization_fields/bulk_crud_anonymization_fields_route.gen';
import { v4 as uuidv4 } from 'uuid';
import { ActionsClientLlm } from '@kbn/langchain/server';
import { AssistantToolParams } from '../../types';

View file

@ -6,7 +6,6 @@
*/
import { buildRouteValidationWithZod } from '@kbn/elastic-assistant-common/impl/schemas/common';
import { ActionsClientLlm } from '@kbn/elastic-assistant-common/impl/language_models';
import { type IKibanaResponse, IRouter, Logger } from '@kbn/core/server';
import {
AttackDiscoveryPostRequestBody,
@ -15,6 +14,7 @@ import {
Replacements,
} from '@kbn/elastic-assistant-common';
import { transformError } from '@kbn/securitysolution-es-utils';
import { ActionsClientLlm } from '@kbn/langchain/server';
import { ATTACK_DISCOVERY } from '../../../common/constants';
import { getAssistantToolParams } from './helpers';

View file

@ -17,7 +17,7 @@ import {
PostEvaluateResponse,
ExecuteConnectorRequestBody,
} from '@kbn/elastic-assistant-common';
import { ActionsClientLlm } from '@kbn/elastic-assistant-common/impl/language_models';
import { ActionsClientLlm } from '@kbn/langchain/server';
import { buildRouteValidationWithZod } from '@kbn/elastic-assistant-common/impl/schemas/common';
import { ESQL_RESOURCE, KNOWLEDGE_BASE_INDEX_PATTERN } from '../knowledge_base/constants';
import { buildResponse } from '../../lib/build_response';

View file

@ -654,11 +654,84 @@ describe('postActionsConnectorExecuteRoute', () => {
);
expect(result).toEqual({
body: {
connector_id: 'mock-connector-id',
data: mockActionResponse,
status: 'ok',
body: mockStream,
headers: {
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
'Transfer-Encoding': 'chunked',
'X-Accel-Buffering': 'no',
'X-Content-Type-Options': 'nosniff',
},
});
}),
};
}),
},
};
await postActionsConnectorExecuteRoute(
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
mockGetElser
);
});
it('returns the expected response when subAction=invokeAI and actionTypeId=.gen-ai', async () => {
const mockRouter = {
versioned: {
post: jest.fn().mockImplementation(() => {
return {
addVersion: jest.fn().mockImplementation(async (_, handler) => {
const result = await handler(
mockContext,
{
...mockRequest,
body: {
...mockRequest.body,
subAction: 'invokeAI',
actionTypeId: '.gen-ai',
},
},
mockResponse
);
expect(result).toEqual({
body: { connector_id: 'mock-connector-id', data: mockActionResponse, status: 'ok' },
headers: {
'content-type': 'application/json',
},
});
}),
};
}),
},
};
await postActionsConnectorExecuteRoute(
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
mockGetElser
);
});
it('returns the expected response when subAction=invokeAI and actionTypeId=.bedrock', async () => {
const mockRouter = {
versioned: {
post: jest.fn().mockImplementation(() => {
return {
addVersion: jest.fn().mockImplementation(async (_, handler) => {
const result = await handler(
mockContext,
{
...mockRequest,
body: {
...mockRequest.body,
subAction: 'invokeAI',
actionTypeId: '.bedrock',
},
},
mockResponse
);
expect(result).toEqual({
body: { connector_id: 'mock-connector-id', data: mockActionResponse, status: 'ok' },
headers: {
'content-type': 'application/json',
},

View file

@ -357,10 +357,7 @@ export const postActionsConnectorExecuteRoute = (
connectorId,
esClient,
esStore,
isStream:
// TODO implement llmClass for bedrock streaming
// tracked here: https://github.com/elastic/security-team/issues/7363
request.body.subAction !== 'invokeAI' && actionTypeId === '.gen-ai',
isStream: request.body.subAction !== 'invokeAI',
llmType: getLlmType(actionTypeId),
langChainMessages,
logger,

View file

@ -36,7 +36,8 @@ import { LicensingApiRequestHandlerContext } from '@kbn/licensing-plugin/server'
import {
ActionsClientChatOpenAI,
ActionsClientLlm,
} from '@kbn/elastic-assistant-common/impl/language_models';
ActionsClientSimpleChatModel,
} from '@kbn/langchain/server';
import { AIAssistantConversationsDataClient } from './ai_assistant_data_clients/conversations';
import type { GetRegisteredFeatures, GetRegisteredTools } from './services/app_context';
@ -212,7 +213,7 @@ export interface AssistantToolParams {
chain?: RetrievalQAChain;
esClient: ElasticsearchClient;
langChainTimeout?: number;
llm?: ActionsClientLlm | ActionsClientChatOpenAI;
llm?: ActionsClientLlm | ActionsClientChatOpenAI | ActionsClientSimpleChatModel;
modelExists: boolean;
onNewReplacements?: (newReplacements: Replacements) => void;
replacements?: Replacements;

View file

@ -46,6 +46,7 @@
"@kbn/i18n",
"@kbn/core-security-common",
"@kbn/core-saved-objects-api-server",
"@kbn/langchain",
],
"exclude": [
"target/**/*",

View file

@ -6,10 +6,7 @@
*/
import { getChatParams } from './get_chat_params';
import {
ActionsClientChatOpenAI,
ActionsClientLlm,
} from '@kbn/elastic-assistant-common/impl/language_models';
import { ActionsClientChatOpenAI, ActionsClientLlm } from '@kbn/langchain/server';
import {
OPENAI_CONNECTOR_ID,
BEDROCK_CONNECTOR_ID,
@ -18,10 +15,14 @@ import { Prompt } from '../../common/prompt';
import { KibanaRequest, Logger } from '@kbn/core/server';
import { PluginStartContract as ActionsPluginStartContract } from '@kbn/actions-plugin/server';
jest.mock('@kbn/elastic-assistant-common/impl/language_models', () => ({
ActionsClientChatOpenAI: jest.fn(),
ActionsClientLlm: jest.fn(),
}));
jest.mock('@kbn/langchain/server', () => {
const original = jest.requireActual('@kbn/langchain/server');
return {
...original,
ActionsClientChatOpenAI: jest.fn(),
ActionsClientLlm: jest.fn(),
};
});
jest.mock('../../common/prompt', () => ({
Prompt: jest.fn((instructions) => instructions),

View file

@ -6,17 +6,17 @@
*/
import { OPENAI_CONNECTOR_ID } from '@kbn/stack-connectors-plugin/common/openai/constants';
import {
ActionsClientChatOpenAI,
ActionsClientLlm,
} from '@kbn/elastic-assistant-common/impl/language_models';
import { v4 as uuidv4 } from 'uuid';
import { BEDROCK_CONNECTOR_ID } from '@kbn/stack-connectors-plugin/common/bedrock/constants';
import type { PluginStartContract as ActionsPluginStartContract } from '@kbn/actions-plugin/server';
import type { KibanaRequest, Logger } from '@kbn/core/server';
import { BaseLanguageModel } from '@langchain/core/language_models/base';
import type { Connector } from '@kbn/actions-plugin/server/application/connector/types';
import { getDefaultArguments } from '@kbn/elastic-assistant-common/impl/language_models/constants';
import {
ActionsClientChatOpenAI,
ActionsClientLlm,
getDefaultArguments,
} from '@kbn/langchain/server';
import { Prompt } from '../../common/prompt';
export const getChatParams = async (

View file

@ -32,7 +32,7 @@
"@kbn/stack-connectors-plugin",
"@kbn/cases-plugin",
"@kbn/triggers-actions-ui-plugin",
"@kbn/elastic-assistant-common",
"@kbn/langchain",
"@kbn/logging",
"@kbn/react-kibana-context-render",
"@kbn/doc-links",

View file

@ -15,13 +15,6 @@ export const YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT = i18n.translate(
}
);
export const USE_THE_FOLLOWING_CONTEXT_TO_ANSWER = i18n.translate(
'xpack.securitySolution.assistant.content.prompts.system.useTheFollowingContextToAnswer',
{
defaultMessage: 'Use the following context to answer questions:',
}
);
export const IF_YOU_DONT_KNOW_THE_ANSWER = i18n.translate(
'xpack.securitySolution.assistant.content.prompts.system.ifYouDontKnowTheAnswer',
{
@ -46,8 +39,7 @@ export const FORMAT_OUTPUT_CORRECTLY = i18n.translate(
);
export const DEFAULT_SYSTEM_PROMPT_NON_I18N = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER}
${FORMAT_OUTPUT_CORRECTLY}
${USE_THE_FOLLOWING_CONTEXT_TO_ANSWER}`;
${FORMAT_OUTPUT_CORRECTLY}`;
export const DEFAULT_SYSTEM_PROMPT_NAME = i18n.translate(
'xpack.securitySolution.assistant.content.prompts.system.defaultSystemPromptName',
@ -58,8 +50,7 @@ export const DEFAULT_SYSTEM_PROMPT_NAME = i18n.translate(
export const SUPERHERO_SYSTEM_PROMPT_NON_I18N = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER}
${SUPERHERO_PERSONALITY}
${FORMAT_OUTPUT_CORRECTLY}
${USE_THE_FOLLOWING_CONTEXT_TO_ANSWER}`;
${FORMAT_OUTPUT_CORRECTLY}`;
export const SUPERHERO_SYSTEM_PROMPT_NAME = i18n.translate(
'xpack.securitySolution.assistant.content.prompts.system.superheroSystemPromptName',
@ -67,7 +58,3 @@ export const SUPERHERO_SYSTEM_PROMPT_NAME = i18n.translate(
defaultMessage: 'Enhanced system prompt',
}
);
export const SYSTEM_PROMPT_CONTEXT_NON_I18N = (context: string) => {
return `CONTEXT:\n"""\n${context}\n"""`;
};

View file

@ -6,8 +6,8 @@
*/
import type { SearchResponse } from '@elastic/elasticsearch/lib/api/types';
import { DynamicTool } from '@langchain/core/tools';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import { requestHasRequiredAnonymizationParams } from '@kbn/elastic-assistant-plugin/server/lib/langchain/helpers';
import type { AssistantTool, AssistantToolParams } from '@kbn/elastic-assistant-plugin/server';
import { getAlertsCountQuery } from './get_alert_counts_query';
@ -17,7 +17,7 @@ export interface AlertCountsToolParams extends AssistantToolParams {
alertsIndexPattern: string;
}
export const ALERT_COUNTS_TOOL_DESCRIPTION =
'Call this for the counts of last 24 hours of open and acknowledged alerts in the environment, grouped by their severity and workflow status.';
'Call this for the counts of last 24 hours of open and acknowledged alerts in the environment, grouped by their severity and workflow status. The response will be JSON and from it you can summarize the information to answer the question.';
export const ALERT_COUNTS_TOOL: AssistantTool = {
id: 'alert-counts-tool',
@ -31,9 +31,10 @@ export const ALERT_COUNTS_TOOL: AssistantTool = {
getTool(params: AssistantToolParams) {
if (!this.isSupported(params)) return null;
const { alertsIndexPattern, esClient } = params as AlertCountsToolParams;
return new DynamicTool({
return new DynamicStructuredTool({
name: 'AlertCountsTool',
description: ALERT_COUNTS_TOOL_DESCRIPTION,
schema: z.object({}),
func: async () => {
const query = getAlertsCountQuery(alertsIndexPattern);
const result = await esClient.search<SearchResponse>(query);

View file

@ -8,7 +8,7 @@
import type { ElasticsearchClient } from '@kbn/core-elasticsearch-server';
import type { KibanaRequest } from '@kbn/core-http-server';
import type { AttackDiscoveryPostRequestBody } from '@kbn/elastic-assistant-common';
import type { ActionsClientLlm } from '@kbn/elastic-assistant-common/impl/language_models';
import type { ActionsClientLlm } from '@kbn/langchain/server';
import type { DynamicTool } from '@langchain/core/tools';
import { ATTACK_DISCOVERY_TOOL } from './attack_discovery_tool';

View file

@ -1,52 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { AssistantTool, AssistantToolParams } from '@kbn/elastic-assistant-plugin/server';
import { APP_UI_ID } from '../../../../common';
export type EsqlKnowledgeBaseToolParams = AssistantToolParams;
const toolDetails = {
description:
'Call this for knowledge on how to build an ESQL query, or answer questions about the ES|QL query language. Input must always be the query on a single line, with no other text. Only output valid ES|QL queries as described above. Do not add any additional text to describe your output.',
id: 'esql-knowledge-base-structured-tool',
name: 'ESQLKnowledgeBaseStructuredTool',
};
export const ESQL_KNOWLEDGE_BASE_STRUCTURED_TOOL: AssistantTool = {
...toolDetails,
sourceRegister: APP_UI_ID,
isSupported: (params: AssistantToolParams): params is EsqlKnowledgeBaseToolParams => {
const { chain, isEnabledKnowledgeBase, modelExists } = params;
return isEnabledKnowledgeBase && modelExists && chain != null;
},
getTool(params: AssistantToolParams) {
if (!this.isSupported(params)) return null;
const { chain } = params as EsqlKnowledgeBaseToolParams;
if (chain == null) return null;
return new DynamicStructuredTool({
name: toolDetails.name,
description: toolDetails.description,
schema: z.object({
question: z.string().describe(`The user's exact question about ESQL`),
}),
func: async (input, _, cbManager) => {
const result = await chain.invoke(
{
query: input.question,
},
cbManager
);
return result.text;
},
tags: ['esql', 'query-generation', 'knowledge-base'],
});
},
};

View file

@ -5,7 +5,8 @@
* 2.0.
*/
import { DynamicTool } from '@langchain/core/tools';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { z } from 'zod';
import type { AssistantTool, AssistantToolParams } from '@kbn/elastic-assistant-plugin/server';
import { APP_UI_ID } from '../../../../common';
@ -30,13 +31,16 @@ export const ESQL_KNOWLEDGE_BASE_TOOL: AssistantTool = {
const { chain } = params as EsqlKnowledgeBaseToolParams;
if (chain == null) return null;
return new DynamicTool({
return new DynamicStructuredTool({
name: toolDetails.name,
description: toolDetails.description,
schema: z.object({
question: z.string().describe(`The user's exact question about ESQL`),
}),
func: async (input, _, cbManager) => {
const result = await chain.invoke(
{
query: input,
query: input.question,
},
cbManager
);

View file

@ -9,7 +9,6 @@ import type { AssistantTool } from '@kbn/elastic-assistant-plugin/server';
import { ALERT_COUNTS_TOOL } from './alert_counts/alert_counts_tool';
import { ESQL_KNOWLEDGE_BASE_TOOL } from './esql_language_knowledge_base/esql_language_knowledge_base_tool';
import { ESQL_KNOWLEDGE_BASE_STRUCTURED_TOOL } from './esql_language_knowledge_base/esql_language_knowledge_base_structured_tool';
import { OPEN_AND_ACKNOWLEDGED_ALERTS_TOOL } from './open_and_acknowledged_alerts/open_and_acknowledged_alerts_tool';
import { ATTACK_DISCOVERY_TOOL } from './attack_discovery/attack_discovery_tool';
@ -17,6 +16,5 @@ export const getAssistantTools = (): AssistantTool[] => [
ALERT_COUNTS_TOOL,
ATTACK_DISCOVERY_TOOL,
ESQL_KNOWLEDGE_BASE_TOOL,
ESQL_KNOWLEDGE_BASE_STRUCTURED_TOOL,
OPEN_AND_ACKNOWLEDGED_ALERTS_TOOL,
];

View file

@ -8,9 +8,9 @@
import type { SearchResponse } from '@elastic/elasticsearch/lib/api/types';
import type { Replacements } from '@kbn/elastic-assistant-common';
import { getAnonymizedValue, transformRawData } from '@kbn/elastic-assistant-common';
import { DynamicTool } from '@langchain/core/tools';
import { DynamicStructuredTool } from '@langchain/core/tools';
import { requestHasRequiredAnonymizationParams } from '@kbn/elastic-assistant-plugin/server/lib/langchain/helpers';
import { z } from 'zod';
import type { AssistantTool, AssistantToolParams } from '@kbn/elastic-assistant-plugin/server';
import { getOpenAndAcknowledgedAlertsQuery } from './get_open_and_acknowledged_alerts_query';
import { getRawDataOrDefault, sizeIsOutOfRange } from './helpers';
@ -22,7 +22,7 @@ export interface OpenAndAcknowledgedAlertsToolParams extends AssistantToolParams
}
export const OPEN_AND_ACKNOWLEDGED_ALERTS_TOOL_DESCRIPTION =
'Call this for knowledge about the latest n open and acknowledged alerts (sorted by `kibana.alert.risk_score`) in the environment, or when answering questions about open alerts. Input should be a string.';
'Call this for knowledge about the latest n open and acknowledged alerts (sorted by `kibana.alert.risk_score`) in the environment, or when answering questions about open alerts. Do not call this tool for alert count or quantity. Input should be an empty object. The output is an array of the latest n open and acknowledged alerts.';
/**
* Returns a tool for querying open and acknowledged alerts, or null if the
@ -53,9 +53,10 @@ export const OPEN_AND_ACKNOWLEDGED_ALERTS_TOOL: AssistantTool = {
replacements,
size,
} = params as OpenAndAcknowledgedAlertsToolParams;
return new DynamicTool({
return new DynamicStructuredTool({
name: 'OpenAndAcknowledgedAlertsTool',
description: OPEN_AND_ACKNOWLEDGED_ALERTS_TOOL_DESCRIPTION,
schema: z.object({}),
func: async () => {
const query = getOpenAndAcknowledgedAlertsQuery({
alertsIndexPattern,

View file

@ -202,6 +202,7 @@
"@kbn/react-kibana-context-render",
"@kbn/search-types",
"@kbn/field-utils",
"@kbn/core-saved-objects-api-server-mocks"
"@kbn/core-saved-objects-api-server-mocks",
"@kbn/langchain"
]
}

View file

@ -148,7 +148,8 @@ export const RunActionResponseSchema = schema.object(
message: schema.object(
{
role: schema.string(),
content: schema.maybe(schema.string()),
// nullable because message can contain function calls instead of final response when used with RAG
content: schema.maybe(schema.nullable(schema.string())),
},
{ unknowns: 'ignore' }
),

View file

@ -319,6 +319,7 @@ export class OpenAIConnector extends SubActionConnector<Config, Secrets> {
* Non-streamed security solution AI Assistant requests
* Responsible for invoking the runApi method with the provided body.
* It then formats the response into a string
* To use function calling, call the run subaction directly
* @param body - the OpenAI chat completion request body
* @returns an object with the response string and the usage object
*/

View file

@ -13577,7 +13577,6 @@
"xpack.elasticAssistant.assistant.content.prompts.system.ifYouDontKnowTheAnswer": "Ne répondez pas aux questions qui ne sont pas liées à Elastic Security.",
"xpack.elasticAssistant.assistant.content.prompts.system.superheroPersonality": "Donnez la réponse la plus pertinente et détaillée possible, comme si vous deviez communiquer ces informations à un expert en cybersécurité.",
"xpack.elasticAssistant.assistant.content.prompts.system.superheroSystemPromptName": "Invite système améliorée",
"xpack.elasticAssistant.assistant.content.prompts.system.useTheFollowingContextToAnswer": "Utilisez le contexte suivant pour répondre aux questions :",
"xpack.elasticAssistant.assistant.content.prompts.system.youAreAHelpfulExpertAssistant": "Vous êtes un assistant expert et serviable qui répond à des questions au sujet dElastic Security.",
"xpack.elasticAssistant.assistant.content.prompts.user.finallySuggestInvestigationGuideAndFormatAsMarkdown": "Ajoutez votre description, les actions que vous recommandez ainsi que les étapes de triage à puces. Utilisez les données \"MITRE ATT&CK\" fournies pour ajouter du contexte et des recommandations de MITRE ainsi que des liens hypertexte vers les pages pertinentes sur le site web de MITRE. Assurez-vous dinclure les scores de risque de lutilisateur et de lhôte du contexte. Votre réponse doit inclure des étapes qui pointent vers les fonctionnalités spécifiques dElastic Security, y compris les actions de réponse du terminal, lintégration OSQuery Manager dElastic Agent (avec des exemples de requêtes OSQuery), des analyses de timeline et dentités, ainsi quun lien pour toute la documentation Elastic Security pertinente.",
"xpack.elasticAssistant.assistant.content.prompts.user.thenSummarizeSuggestedKqlAndEqlQueries": "Évaluer lévénement depuis le contexte ci-dessus et formater soigneusement la sortie en syntaxe Markdown pour mon cas Elastic Security.",
@ -33571,7 +33570,6 @@
"xpack.securitySolution.assistant.content.prompts.system.outputFormatting": "Si vous répondez à une question liée à KQL, à EQL, ou à ES|QL, la réponse doit être immédiatement utilisable dans une chronologie d'Elastic Security ; veuillez toujours formater correctement la sortie avec des accents graves. Toute réponse à une requête DSL doit aussi être utilisable dans une chronologie de sécurité. Cela signifie que vous ne devez inclure que la portion \"filtre\" de la requête.",
"xpack.securitySolution.assistant.content.prompts.system.superheroPersonality": "Donnez la réponse la plus pertinente et détaillée possible, comme si vous deviez communiquer ces informations à un expert en cybersécurité.",
"xpack.securitySolution.assistant.content.prompts.system.superheroSystemPromptName": "Invite système améliorée",
"xpack.securitySolution.assistant.content.prompts.system.useTheFollowingContextToAnswer": "Utilisez le contexte suivant pour répondre aux questions :",
"xpack.securitySolution.assistant.content.prompts.system.youAreAHelpfulExpertAssistant": "Vous êtes un assistant expert et serviable qui répond à des questions au sujet dElastic Security.",
"xpack.securitySolution.assistant.content.prompts.user.finallySuggestInvestigationGuideAndFormatAsMarkdown": "Ajoutez votre description, les actions que vous recommandez ainsi que les étapes de triage à puces. Utilisez les données \"MITRE ATT&CK\" fournies pour ajouter du contexte et des recommandations de MITRE ainsi que des liens hypertexte vers les pages pertinentes sur le site web de MITRE. Assurez-vous dinclure les scores de risque de lutilisateur et de lhôte du contexte. Votre réponse doit inclure des étapes qui pointent vers les fonctionnalités spécifiques dElastic Security, y compris les actions de réponse du terminal, lintégration OSQuery Manager dElastic Agent (avec des exemples de requêtes OSQuery), des analyses de timeline et dentités, ainsi quun lien pour toute la documentation Elastic Security pertinente.",
"xpack.securitySolution.assistant.content.prompts.user.thenSummarizeSuggestedKqlAndEqlQueries": "Évaluer lévénement depuis le contexte ci-dessus et formater soigneusement la sortie en syntaxe Markdown pour mon cas Elastic Security.",

View file

@ -13556,7 +13556,6 @@
"xpack.elasticAssistant.assistant.content.prompts.system.ifYouDontKnowTheAnswer": "Elasticセキュリティに関連していない質問には回答しないでください。",
"xpack.elasticAssistant.assistant.content.prompts.system.superheroPersonality": "サイバーセキュリティの専門家に情報を伝えるつもりで、できるだけ詳細で関連性のある回答を入力してください。",
"xpack.elasticAssistant.assistant.content.prompts.system.superheroSystemPromptName": "拡張システムプロンプト",
"xpack.elasticAssistant.assistant.content.prompts.system.useTheFollowingContextToAnswer": "次のコンテキストを使用して、質問に回答します。",
"xpack.elasticAssistant.assistant.content.prompts.system.youAreAHelpfulExpertAssistant": "あなたはElasticセキュリティに関する質問に答える、親切で専門的なアシスタントです。",
"xpack.elasticAssistant.assistant.content.prompts.user.finallySuggestInvestigationGuideAndFormatAsMarkdown": "説明、推奨されるアクション、箇条書きのトリアージステップを追加します。提供された MITRE ATT&CKデータを使用して、MITREからのコンテキストや推奨事項を追加し、MITREのWebサイトの関連ページにハイパーリンクを貼ります。コンテキストのユーザーとホストのリスクスコアデータを必ず含めてください。回答には、エンドポイント対応アクション、ElasticエージェントOSQueryマネージャー統合osqueryクエリの例を付けて、タイムライン、エンティティ分析など、Elasticセキュリティ固有の機能を指す手順を含め、関連するElasticセキュリティのドキュメントすべてにリンクしてください。",
"xpack.elasticAssistant.assistant.content.prompts.user.thenSummarizeSuggestedKqlAndEqlQueries": "上記のコンテキストからイベントを評価し、Elasticセキュリティのケース用に、出力をマークダウン構文で正しく書式設定してください。",
@ -33540,7 +33539,6 @@
"xpack.securitySolution.assistant.content.prompts.system.outputFormatting": "KQL、EQL、ES|QLに関連する質問に回答した場合、Elastic Securityのタイムライン内ですぐに使用できるようにする必要があります。出力は常にバックティックで正しい形式にしてください。クエリDSLで提供されるすべての回答は、セキュリティタイムラインでも使用可能でなければなりません。つまり、クエリの\"フィルター\"部分のみを含める必要があります。",
"xpack.securitySolution.assistant.content.prompts.system.superheroPersonality": "サイバーセキュリティの専門家に情報を伝えるつもりで、できるだけ詳細で関連性のある回答を入力してください。",
"xpack.securitySolution.assistant.content.prompts.system.superheroSystemPromptName": "拡張システムプロンプト",
"xpack.securitySolution.assistant.content.prompts.system.useTheFollowingContextToAnswer": "次のコンテキストを使用して、質問に回答します。",
"xpack.securitySolution.assistant.content.prompts.system.youAreAHelpfulExpertAssistant": "あなたはElasticセキュリティに関する質問に答える、親切で専門的なアシスタントです。",
"xpack.securitySolution.assistant.content.prompts.user.finallySuggestInvestigationGuideAndFormatAsMarkdown": "説明、推奨されるアクション、箇条書きのトリアージステップを追加します。提供された MITRE ATT&CKデータを使用して、MITREからのコンテキストや推奨事項を追加し、MITREのWebサイトの関連ページにハイパーリンクを貼ります。コンテキストのユーザーとホストのリスクスコアデータを必ず含めてください。回答には、エンドポイント対応アクション、ElasticエージェントOSQueryマネージャー統合osqueryクエリの例を付けて、タイムライン、エンティティ分析など、Elasticセキュリティ固有の機能を指す手順を含め、関連するElasticセキュリティのドキュメントすべてにリンクしてください。",
"xpack.securitySolution.assistant.content.prompts.user.thenSummarizeSuggestedKqlAndEqlQueries": "上記のコンテキストからイベントを評価し、Elasticセキュリティのケース用に、出力をマークダウン構文で正しく書式設定してください。",

View file

@ -13583,7 +13583,6 @@
"xpack.elasticAssistant.assistant.content.prompts.system.ifYouDontKnowTheAnswer": "不回答与 Elastic Security 无关的问题。",
"xpack.elasticAssistant.assistant.content.prompts.system.superheroPersonality": "提供可能的最详细、最相关的答案,就好像您正将此信息转发给网络安全专家一样。",
"xpack.elasticAssistant.assistant.content.prompts.system.superheroSystemPromptName": "已增强系统提示",
"xpack.elasticAssistant.assistant.content.prompts.system.useTheFollowingContextToAnswer": "使用以下上下文回答问题:",
"xpack.elasticAssistant.assistant.content.prompts.system.youAreAHelpfulExpertAssistant": "您是一位可帮助回答 Elastic Security 相关问题的专家助手。",
"xpack.elasticAssistant.assistant.content.prompts.user.finallySuggestInvestigationGuideAndFormatAsMarkdown": "添加描述、建议操作和带项目符号的分类步骤。使用提供的 MITRE ATT&CK 数据以从 MITRE 添加更多上下文和建议,以及指向 MITRE 网站上的相关页面的超链接。确保包括上下文中的用户和主机风险分数数据。您的响应应包含指向 Elastic Security 特定功能的步骤包括终端响应操作、Elastic 代理 OSQuery 管理器集成(带示例 osquery 查询)、时间线和实体分析,以及所有相关 Elastic Security 文档的链接。",
"xpack.elasticAssistant.assistant.content.prompts.user.thenSummarizeSuggestedKqlAndEqlQueries": "评估来自上述上下文的事件,并以用于我的 Elastic Security 案例的 Markdown 语法对您的输出进行全面格式化。",
@ -33584,7 +33583,6 @@
"xpack.securitySolution.assistant.content.prompts.system.outputFormatting": "如果您回答与 KQL、EQL 或 ES|QL 相关的问题,它应在 Elastic Security 时间线中立即可用;请始终用反勾号对输出进行正确格式化。为查询 DSL 提供的任何答案也应在安全时间线中可用。这意味着您只应包括查询的“筛选”部分。",
"xpack.securitySolution.assistant.content.prompts.system.superheroPersonality": "提供可能的最详细、最相关的答案,就好像您正将此信息转发给网络安全专家一样。",
"xpack.securitySolution.assistant.content.prompts.system.superheroSystemPromptName": "已增强系统提示",
"xpack.securitySolution.assistant.content.prompts.system.useTheFollowingContextToAnswer": "使用以下上下文回答问题:",
"xpack.securitySolution.assistant.content.prompts.system.youAreAHelpfulExpertAssistant": "您是一位可帮助回答 Elastic Security 相关问题的专家助手。",
"xpack.securitySolution.assistant.content.prompts.user.finallySuggestInvestigationGuideAndFormatAsMarkdown": "添加描述、建议操作和带项目符号的分类步骤。使用提供的 MITRE ATT&CK 数据以从 MITRE 添加更多上下文和建议,以及指向 MITRE 网站上的相关页面的超链接。确保包括上下文中的用户和主机风险分数数据。您的响应应包含指向 Elastic Security 特定功能的步骤包括终端响应操作、Elastic 代理 OSQuery 管理器集成(带示例 osquery 查询)、时间线和实体分析,以及所有相关 Elastic Security 文档的链接。",
"xpack.securitySolution.assistant.content.prompts.user.thenSummarizeSuggestedKqlAndEqlQueries": "评估来自上述上下文的事件,并以用于我的 Elastic Security 案例的 Markdown 语法对您的输出进行全面格式化。",

View file

@ -5091,6 +5091,10 @@
version "0.0.0"
uid ""
"@kbn/langchain@link:x-pack/packages/kbn-langchain":
version "0.0.0"
uid ""
"@kbn/language-documentation-popover@link:packages/kbn-language-documentation-popover":
version "0.0.0"
uid ""