[Stack connector] Forward telemetryMetadata.pluginId to EIS use case header (#214269)

This commit is contained in:
Steph Milovic 2025-03-13 09:44:06 -06:00 committed by GitHub
parent ebe7ef6da0
commit 13906cbb76
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 70 additions and 3 deletions

View file

@ -11,6 +11,7 @@ import { LLM } from '@langchain/core/language_models/llms';
import { get } from 'lodash/fp';
import { v4 as uuidv4 } from 'uuid';
import { PublicMethodsOf } from '@kbn/utility-types';
import type { TelemetryMetadata } from '@kbn/actions-plugin/server/lib';
import { DEFAULT_TIMEOUT, getDefaultArguments } from './constants';
import { getMessageContentAndRole } from './helpers';
@ -28,6 +29,7 @@ interface ActionsClientLlmParams {
timeout?: number;
traceId?: string;
traceOptions?: TraceOptions;
telemetryMetadata?: TelemetryMetadata;
}
export class ActionsClientLlm extends LLM {
@ -36,6 +38,7 @@ export class ActionsClientLlm extends LLM {
#logger: Logger;
#traceId: string;
#timeout?: number;
telemetryMetadata?: TelemetryMetadata;
// Local `llmType` as it can change and needs to be accessed by abstract `_llmType()` method
// Not using getter as `this._llmType()` is called in the constructor via `super({})`
@ -54,6 +57,7 @@ export class ActionsClientLlm extends LLM {
temperature,
timeout,
traceOptions,
telemetryMetadata,
}: ActionsClientLlmParams) {
super({
callbacks: [...(traceOptions?.tracers ?? [])],
@ -67,6 +71,7 @@ export class ActionsClientLlm extends LLM {
this.#timeout = timeout;
this.model = model;
this.temperature = temperature;
this.telemetryMetadata = telemetryMetadata;
}
_llmType() {
@ -102,6 +107,7 @@ export class ActionsClientLlm extends LLM {
model: this.model,
messages: [assistantMessage], // the assistant message
},
telemetryMetadata: this.telemetryMetadata,
},
}
: {
@ -113,6 +119,7 @@ export class ActionsClientLlm extends LLM {
...getDefaultArguments(this.llmType, this.temperature),
// This timeout is large because LangChain prompts can be complicated and take a long time
timeout: this.#timeout ?? DEFAULT_TIMEOUT,
telemetryMetadata: this.telemetryMetadata,
},
},
};

View file

@ -77,6 +77,9 @@ export function registerApiAnalysisRoutes(router: IRouter<AutomaticImportRouteHa
maxTokens: 4096,
signal: abortSignal,
streaming: false,
telemetryMetadata: {
pluginId: 'automatic_import',
},
});
const parameters = {

View file

@ -88,6 +88,9 @@ export function registerAnalyzeLogsRoutes(router: IRouter<AutomaticImportRouteHa
maxTokens: 4096,
signal: abortSignal,
streaming: false,
telemetryMetadata: {
pluginId: 'automatic_import',
},
});
const options = {
callbacks: [

View file

@ -93,6 +93,9 @@ export function registerCategorizationRoutes(router: IRouter<AutomaticImportRout
maxTokens: 4096,
signal: abortSignal,
streaming: false,
telemetryMetadata: {
pluginId: 'automatic_import',
},
});
const parameters = {

View file

@ -77,6 +77,9 @@ export function registerCelInputRoutes(router: IRouter<AutomaticImportRouteHandl
maxTokens: 4096,
signal: abortSignal,
streaming: false,
telemetryMetadata: {
pluginId: 'automatic_import',
},
});
const parameters = {

View file

@ -87,6 +87,9 @@ export function registerEcsRoutes(router: IRouter<AutomaticImportRouteHandlerCon
maxTokens: 4096,
signal: abortSignal,
streaming: false,
telemetryMetadata: {
pluginId: 'automatic_import',
},
});
const parameters = {

View file

@ -87,6 +87,9 @@ export function registerRelatedRoutes(router: IRouter<AutomaticImportRouteHandle
maxTokens: 4096,
signal: abortSignal,
streaming: false,
telemetryMetadata: {
pluginId: 'automatic_import',
},
});
const parameters = {

View file

@ -70,6 +70,7 @@ describe('InferenceConnector', () => {
const response = await connector.performApiUnifiedCompletion({
body: { messages: [{ content: 'What is Elastic?', role: 'user' }] },
telemetryMetadata: { pluginId: 'security_ai_assistant' },
});
expect(mockEsClient.transport.request).toBeCalledTimes(1);
expect(mockEsClient.transport.request).toHaveBeenCalledWith(
@ -86,7 +87,13 @@ describe('InferenceConnector', () => {
method: 'POST',
path: '_inference/chat_completion/test/_stream',
},
{ asStream: true, meta: true }
{
asStream: true,
meta: true,
headers: {
'X-Elastic-Product-Use-Case': 'security_ai_assistant',
},
}
);
expect(response.choices[0].message.content).toEqual(' you');
});
@ -290,7 +297,10 @@ describe('InferenceConnector', () => {
method: 'POST',
path: '_inference/chat_completion/test/_stream',
},
{ asStream: true, meta: true }
{
asStream: true,
meta: true,
}
);
});
@ -312,7 +322,11 @@ describe('InferenceConnector', () => {
method: 'POST',
path: '_inference/chat_completion/test/_stream',
},
{ asStream: true, meta: true, signal }
{
asStream: true,
meta: true,
signal,
}
);
});

View file

@ -196,6 +196,13 @@ export class InferenceConnector extends SubActionConnector<Config, Secrets> {
asStream: true,
meta: true,
signal: params.signal,
...(params.telemetryMetadata?.pluginId
? {
headers: {
'X-Elastic-Product-Use-Case': params.telemetryMetadata?.pluginId,
},
}
: {}),
}
);
// errors should be thrown as it will not be a stream response

View file

@ -61,5 +61,8 @@ export const getEvaluatorLlm = async ({
temperature: 0, // zero temperature for evaluation
timeout: connectorTimeout,
traceOptions,
telemetryMetadata: {
pluginId: 'security_attack_discovery',
},
});
};

View file

@ -92,6 +92,9 @@ export const evaluateAttackDiscovery = async ({
temperature: 0, // zero temperature for attack discovery, because we want structured JSON output
timeout: connectorTimeout,
traceOptions,
telemetryMetadata: {
pluginId: 'security_attack_discovery',
},
});
const graph = getDefaultAttackDiscoveryGraph({

View file

@ -89,6 +89,9 @@ export const callAssistantGraph: AgentExecutor<true | false> = async ({
// failure could be due to bad connector, we should deliver that result to the client asap
maxRetries: 0,
convertSystemMessageToHumanContent: false,
telemetryMetadata: {
pluginId: 'security_ai_assistant',
},
});
const anonymizationFieldsRes =

View file

@ -87,6 +87,9 @@ export const invokeAttackDiscoveryGraph = async ({
temperature: 0, // zero temperature for attack discovery, because we want structured JSON output
timeout: connectorTimeout,
traceOptions,
telemetryMetadata: {
pluginId: 'security_attack_discovery',
},
});
if (llm == null) {

View file

@ -156,6 +156,9 @@ export function getAssistantToolParams({
temperature: 0, // zero temperature because we want structured JSON output
timeout: connectorTimeout,
traceOptions,
telemetryMetadata: {
pluginId: 'security_defend_insights',
},
});
return {
@ -443,6 +446,9 @@ export const invokeDefendInsightsGraph = async ({
temperature: 0,
timeout: connectorTimeout,
traceOptions,
telemetryMetadata: {
pluginId: 'security_defend_insights',
},
});
if (llm == null) {

View file

@ -250,6 +250,9 @@ export const postEvaluateRoute = (
streaming: false,
maxRetries: 0,
convertSystemMessageToHumanContent: false,
telemetryMetadata: {
pluginId: 'security_ai_assistant',
},
});
const llm = createLlmInstance();
const anonymizationFieldsRes =