[8.15] [Security solution] Add model parameter to token telemetry (#187783) (#187793)

# Backport

This will backport the following commits from `main` to `8.15`:
- [[Security solution] Add model parameter to token telemetry
(#187783)](https://github.com/elastic/kibana/pull/187783)

<!--- Backport version: 9.4.3 -->

### Questions ?
Please refer to the [Backport tool
documentation](https://github.com/sqren/backport)

<!--BACKPORT [{"author":{"name":"Steph
Milovic","email":"stephanie.milovic@elastic.co"},"sourceCommit":{"committedDate":"2024-07-08T18:53:25Z","message":"[Security
solution] Add model parameter to token telemetry
(#187783)","sha":"2b5ff7709515836b6ce91d6ce28ed3316f78d930","branchLabelMapping":{"^v8.16.0$":"main","^v(\\d+).(\\d+).\\d+$":"$1.$2"}},"sourcePullRequest":{"labels":["release_note:skip","Team:Security
Generative AI","v8.15.0","v8.16.0"],"title":"[Security solution] Add
model parameter to token
telemetry","number":187783,"url":"https://github.com/elastic/kibana/pull/187783","mergeCommit":{"message":"[Security
solution] Add model parameter to token telemetry
(#187783)","sha":"2b5ff7709515836b6ce91d6ce28ed3316f78d930"}},"sourceBranch":"main","suggestedTargetBranches":["8.15"],"targetPullRequestStates":[{"branch":"8.15","label":"v8.15.0","branchLabelMappingKey":"^v(\\d+).(\\d+).\\d+$","isSourceBranch":false,"state":"NOT_CREATED"},{"branch":"main","label":"v8.16.0","branchLabelMappingKey":"^v8.16.0$","isSourceBranch":true,"state":"MERGED","url":"https://github.com/elastic/kibana/pull/187783","number":187783,"mergeCommit":{"message":"[Security
solution] Add model parameter to token telemetry
(#187783)","sha":"2b5ff7709515836b6ce91d6ce28ed3316f78d930"}}]}]
BACKPORT-->

Co-authored-by: Steph Milovic <stephanie.milovic@elastic.co>
Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
This commit is contained in:
Kibana Machine 2024-07-08 23:54:15 +02:00 committed by GitHub
parent d018faaed5
commit 17bf446e73
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 36 additions and 1 deletions

View file

@ -1695,10 +1695,35 @@ describe('Event log', () => {
{ actionTypeId: '.gen-ai', completion_tokens: 9, prompt_tokens: 10, total_tokens: 19 }
);
});
test('reports telemetry for token count events with additional properties', async () => {
const executorMock = setupActionExecutorMock('.gen-ai', {} as ConnectorType['validate'], {
defaultModel: 'gpt-4',
apiProvider: 'OpenAI',
});
executorMock.mockResolvedValue({
actionId: '1',
status: 'ok',
// @ts-ignore
data: mockGenAi,
});
await actionExecutor.execute(executeParams);
expect(actionExecutorInitializationParams.analyticsService.reportEvent).toHaveBeenCalledWith(
GEN_AI_TOKEN_COUNT_EVENT.eventType,
{
actionTypeId: '.gen-ai',
completion_tokens: 9,
prompt_tokens: 10,
total_tokens: 19,
model: 'gpt-4',
provider: 'OpenAI',
}
);
});
});
function setupActionExecutorMock(
actionTypeId = 'test',
validationOverride?: ConnectorType['validate']
validationOverride?: ConnectorType['validate'],
additionalConfig?: Record<string, unknown>
) {
const thisConnectorType: jest.Mocked<ConnectorType> = {
...connectorType,
@ -1712,6 +1737,7 @@ function setupActionExecutorMock(
actionTypeId,
config: {
bar: true,
...additionalConfig,
},
secrets: {
baz: true,

View file

@ -603,6 +603,7 @@ export class ActionExecutor {
...(actionTypeId === '.gen-ai' && config?.apiProvider != null
? { provider: config?.apiProvider }
: {}),
...(config?.defaultModel != null ? { model: config?.defaultModel } : {}),
});
}
})

View file

@ -13,6 +13,7 @@ export const GEN_AI_TOKEN_COUNT_EVENT: EventTypeOpts<{
prompt_tokens: number;
completion_tokens: number;
provider?: string;
model?: string;
}> = {
eventType: 'gen_ai_token_count',
schema: {
@ -51,6 +52,13 @@ export const GEN_AI_TOKEN_COUNT_EVENT: EventTypeOpts<{
optional: true,
},
},
model: {
type: 'keyword',
_meta: {
description: 'LLM model',
optional: true,
},
},
},
};