mirror of
https://github.com/elastic/kibana.git
synced 2025-06-27 10:40:07 -04:00
[Security Assistant] Adds BuildKite pipeline for running Security GenAI Evaluations weekly (#215254)
## Summary Introduces a new `security_solution/gen_ai_evals.yml` BuildKite pipeline for automatically running our Assistant and Attack Discovery evaluation suites weekly. ### To Run Locally: Ensure you are authenticated with vault for LLM + LangSmith creds: > See [internal docs](https://github.com/elastic/infra/blob/master/docs/vault/README.md#login-with-your-okta) for setup/login instructions. Fetch Connectors and LangSmith creds: > [!NOTE] > In discussion with @elastic/kibana-operations it was preferred to use the ci-prod secrets vault, so we cannot self-manage the secrets. To test this locally though, you can grab the secrets and follow the instructions in this [paste bin](https://p.elstc.co/paste/q7k+zYOc#PN0kasw11u2J0XWC2Ls5PMNWreKzKTpgWA1wtsPzeH+). ``` cd x-pack/test/security_solution_api_integration node scripts/genai/vault/retrieve_secrets.js ``` Navigate to api integration directory, load the env vars, and start server: ``` cd x-pack/test/security_solution_api_integration export KIBANA_SECURITY_TESTING_AI_CONNECTORS=$(base64 -w 0 < scripts/genai/vault/connector_config.json) && export KIBANA_SECURITY_TESTING_LANGSMITH_KEY=$(base64 -w 0 < scripts/genai/vault/langsmith_key.txt) yarn genai_evals:server:ess ``` Then in another terminal, load vars and run the tests: ``` cd x-pack/test/security_solution_api_integration export KIBANA_SECURITY_TESTING_AI_CONNECTORS=$(base64 -w 0 < scripts/genai/vault/connector_config.json) && export KIBANA_SECURITY_TESTING_LANGSMITH_KEY=$(base64 -w 0 < scripts/genai/vault/langsmith_key.txt) yarn genai_evals🏃ess ``` ### To manually run on BuildKite: Navigate to [BuildKite](https://buildkite.com/elastic?filter=ftr-security-solution-gen-ai-evaluations) and run `ftr-security-solution-gen-ai-evaluations` pipeline. ### To manually run on BuildKite for specific PR: In `.buildkite/ftr_security_stateful_configs.yml`, temporarily move the `genai/evaluations/trial_license_complete_tier/configs/ess.config.ts` line down to the `enabled` section. Will see if we can do this without requiring a commit. @elastic/kibana-operations is it possible to set a buildkite env var that can be read in FTR tests when a specific GitHub label is added to the PR? I.e. can I create a `SecurityGenAI:Run Evals` label that when added will run this suite as part of the build? > [!NOTE] > Currently the connectors secrets only include `gpt-4o` and `gpt-4o-mini`. Waiting on finalized list w/ credentials from @jamesspi and @peluja1012 and then we can have ops update using the scripts included in this PR. --------- Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com> Co-authored-by: Patryk Kopycinski <patryk.kopycinski@elastic.co>
This commit is contained in:
parent
91b0988c2c
commit
e57663a0cf
58 changed files with 9635 additions and 198 deletions
|
@ -24,6 +24,9 @@ disabled:
|
|||
# Playwright
|
||||
- x-pack/test/security_solution_playwright/playwright.config.ts
|
||||
|
||||
# Gen AI Evals run weekly via their own pipeline
|
||||
- x-pack/test/security_solution_api_integration/test_suites/genai/evaluations/trial_license_complete_tier/configs/ess.config.ts
|
||||
|
||||
defaultQueue: 'n2-4-spot'
|
||||
enabled:
|
||||
- x-pack/test/security_solution_api_integration/test_suites/detections_response/detection_engine/actions/trial_license_complete_tier/configs/ess.config.ts
|
||||
|
|
|
@ -44,6 +44,7 @@ spec:
|
|||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/kibana-serverless-release.yml
|
||||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/kibana-vm-images.yml
|
||||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/scalability_testing-daily.yml
|
||||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/security-solution-ess/gen-ai-evals.yml
|
||||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/security-solution-ess/security-solution-ess.yml
|
||||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/security-solution-quality-gate/kibana-serverless-security-solution-quality-gate-defend-workflows.yml
|
||||
- https://github.com/elastic/kibana/blob/main/.buildkite/pipeline-resource-definitions/security-solution-quality-gate/kibana-serverless-security-solution-quality-gate-detection-engine.yml
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json
|
||||
apiVersion: backstage.io/v1alpha1
|
||||
kind: Resource
|
||||
metadata:
|
||||
name: bk-kibana-security-solution-ess-gen-ai-evals
|
||||
description: "Executes Security GenAI Evals"
|
||||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: 'group:security-generative-ai'
|
||||
system: buildkite
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
metadata:
|
||||
name: "Kibana / ESS / Security Solution / Gen AI Evals"
|
||||
description: "Executes Security GenAI Evals"
|
||||
spec:
|
||||
env:
|
||||
SLACK_NOTIFICATIONS_CHANNEL: '#genai-security-dev'
|
||||
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true'
|
||||
allow_rebuilds: true
|
||||
branch_configuration: main
|
||||
cancel_intermediate_builds: false
|
||||
default_branch: main
|
||||
repository: elastic/kibana
|
||||
pipeline_file: .buildkite/pipelines/security_solution/gen_ai_evals.yml
|
||||
provider_settings:
|
||||
build_branches: false
|
||||
build_pull_requests: false
|
||||
publish_commit_status: false
|
||||
trigger_mode: none
|
||||
prefix_pull_request_fork_branch_names: false
|
||||
skip_pull_request_builds_for_existing_commits: false
|
||||
build_tags: false
|
||||
teams:
|
||||
kibana-operations:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
security-generative_ai:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
everyone:
|
||||
access_level: READ_ONLY
|
||||
schedules:
|
||||
Weekly build:
|
||||
cronline: 0 0 * * 1 GMT
|
||||
message: Weekly Security GenAI Evals
|
||||
branch: main
|
||||
tags:
|
||||
- kibana
|
||||
- security-solution
|
||||
- gen-ai-evals
|
|
@ -0,0 +1,30 @@
|
|||
steps:
|
||||
- group: Security Solution Gen AI
|
||||
key: security-solution-gen-ai
|
||||
depends_on:
|
||||
- build
|
||||
- quick_checks
|
||||
- checks
|
||||
- linting
|
||||
- linting_with_types
|
||||
- check_types
|
||||
- check_oas_snapshot
|
||||
steps:
|
||||
- command: .buildkite/scripts/steps/test/ftr_configs.sh
|
||||
env:
|
||||
FTR_CONFIG: "x-pack/test/security_solution_api_integration/test_suites/genai/evaluations/trial_license_complete_tier/configs/ess.config.ts"
|
||||
FTR_CONFIG_GROUP_KEY: 'ftr-security-solution-gen-ai-evaluations'
|
||||
FTR_SECURITY_GEN_AI: "1"
|
||||
label: Security Solution Gen AI Evaluations
|
||||
key: security-solution-gen-ai-evaluations
|
||||
timeout_in_minutes: 50
|
||||
parallelism: 1
|
||||
agents:
|
||||
machineType: n2-standard-4
|
||||
preemptible: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: '-1'
|
||||
limit: 3
|
||||
- exit_status: '*'
|
||||
limit: 1
|
48
.buildkite/pipelines/security_solution/gen_ai_evals.yml
Normal file
48
.buildkite/pipelines/security_solution/gen_ai_evals.yml
Normal file
|
@ -0,0 +1,48 @@
|
|||
env:
|
||||
FTR_SECURITY_GEN_AI: "1"
|
||||
steps:
|
||||
- label: '👨🔧 Pre-Build'
|
||||
command: .buildkite/scripts/lifecycle/pre_build.sh
|
||||
agents:
|
||||
image: family/kibana-ubuntu-2004
|
||||
imageProject: elastic-images-prod
|
||||
provider: gcp
|
||||
machineType: n2-standard-2
|
||||
|
||||
- wait
|
||||
|
||||
- label: '🧑🏭 Build Kibana Distribution'
|
||||
command: .buildkite/scripts/steps/build_kibana.sh
|
||||
agents:
|
||||
image: family/kibana-ubuntu-2004
|
||||
imageProject: elastic-images-prod
|
||||
provider: gcp
|
||||
machineType: n2-standard-8
|
||||
key: build
|
||||
if: "build.env('KIBANA_BUILD_ID') == null || build.env('KIBANA_BUILD_ID') == ''"
|
||||
|
||||
- wait
|
||||
|
||||
- command: .buildkite/scripts/steps/test/ftr_configs.sh
|
||||
env:
|
||||
FTR_CONFIG: "x-pack/test/security_solution_api_integration/test_suites/genai/evaluations/trial_license_complete_tier/configs/ess.config.ts"
|
||||
FTR_CONFIG_GROUP_KEY: 'ftr-security-solution-gen-ai-evaluations'
|
||||
FTR_SECURITY_GEN_AI: "1"
|
||||
label: Security Solution Gen AI Evaluations
|
||||
key: ftr-security-solution-gen-ai-evaluations
|
||||
timeout_in_minutes: 50
|
||||
parallelism: 1
|
||||
agents:
|
||||
image: family/kibana-ubuntu-2004
|
||||
imageProject: elastic-images-prod
|
||||
provider: gcp
|
||||
machineType: n2-standard-4
|
||||
preemptible: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: '-1'
|
||||
limit: 3
|
||||
- exit_status: '*'
|
||||
limit: 1
|
||||
|
||||
|
|
@ -140,6 +140,15 @@ EOF
|
|||
fi
|
||||
}
|
||||
|
||||
# Set up Security GenAI keys
|
||||
{
|
||||
if [[ "${FTR_SECURITY_GEN_AI:-}" =~ ^(1|true)$ ]]; then
|
||||
echo "FTR_SECURITY_GEN_AI was set - exposing LLM connectors"
|
||||
export KIBANA_SECURITY_TESTING_AI_CONNECTORS="$(vault_get security-gen-ai/connectors config)"
|
||||
export KIBANA_SECURITY_TESTING_LANGSMITH_KEY="$(vault_get security-gen-ai/langsmith key)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Set up GCS Service Account for CDN
|
||||
{
|
||||
GCS_SA_CDN_KEY="$(vault_get gcs-sa-cdn-prod key)"
|
||||
|
|
|
@ -475,6 +475,12 @@ const getPipeline = (filename: string, removeSteps = true) => {
|
|||
pipeline.push(getPipeline('.buildkite/pipelines/pull_request/scout_tests.yml'));
|
||||
}
|
||||
|
||||
if (GITHUB_PR_LABELS.includes('ci:security-genai-run-evals')) {
|
||||
pipeline.push(
|
||||
getPipeline('.buildkite/pipelines/pull_request/security_solution/gen_ai_evals.yml')
|
||||
);
|
||||
}
|
||||
|
||||
pipeline.push(getPipeline('.buildkite/pipelines/pull_request/post_build.yml'));
|
||||
|
||||
emitPipeline(pipeline);
|
||||
|
|
|
@ -110,3 +110,7 @@ Create or update a serverless Security project on Elastic Cloud QA.
|
|||
#### `ci:project-persist-deployment`
|
||||
|
||||
Prevents an existing deployment from being shutdown due to inactivity.
|
||||
|
||||
#### `ci:security-genai-run-evals`
|
||||
|
||||
Run evaluations for the GenAI security evaluation suite.
|
|
@ -56,7 +56,9 @@ export const EvaluationSettings: React.FC = React.memo(() => {
|
|||
http,
|
||||
toasts,
|
||||
});
|
||||
const { data: evalData } = useEvaluationData({ http });
|
||||
const { data: evalData } = useEvaluationData({
|
||||
http,
|
||||
});
|
||||
const defaultGraphs = useMemo(() => (evalData as GetEvaluateResponse)?.graphs ?? [], [evalData]);
|
||||
const datasets = useMemo(() => (evalData as GetEvaluateResponse)?.datasets ?? [], [evalData]);
|
||||
|
||||
|
|
|
@ -7,11 +7,15 @@
|
|||
|
||||
import { schema, type TypeOf } from '@kbn/config-schema';
|
||||
import type { PluginConfigDescriptor } from '@kbn/core/server';
|
||||
import { internalElserInferenceId } from '../common/consts';
|
||||
|
||||
const configSchema = schema.object({
|
||||
artifactRepositoryUrl: schema.string({
|
||||
defaultValue: 'https://kibana-knowledge-base-artifacts.elastic.co',
|
||||
}),
|
||||
elserInferenceId: schema.string({
|
||||
defaultValue: internalElserInferenceId,
|
||||
}),
|
||||
});
|
||||
|
||||
export const config: PluginConfigDescriptor<ProductDocBaseConfig> = {
|
||||
|
|
|
@ -84,6 +84,7 @@ export class ProductDocBasePlugin
|
|||
kibanaVersion: this.context.env.packageInfo.version,
|
||||
artifactsFolder: Path.join(getDataPath(), 'ai-kb-artifacts'),
|
||||
artifactRepositoryUrl: this.context.config.get().artifactRepositoryUrl,
|
||||
elserInferenceId: this.context.config.get().elserInferenceId,
|
||||
logger: this.logger.get('package-installer'),
|
||||
});
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import { loggerMock, type MockedLogger } from '@kbn/logging-mocks';
|
|||
import { installClientMock } from '../doc_install_status/service.mock';
|
||||
import type { ProductInstallState } from '../../../common/install_status';
|
||||
import { PackageInstaller } from './package_installer';
|
||||
import { defaultInferenceEndpoints } from '@kbn/inference-common';
|
||||
|
||||
const artifactsFolder = '/lost';
|
||||
const artifactRepositoryUrl = 'https://repository.com';
|
||||
|
@ -114,6 +115,7 @@ describe('PackageInstaller', () => {
|
|||
|
||||
expect(createIndexMock).toHaveBeenCalledTimes(1);
|
||||
expect(createIndexMock).toHaveBeenCalledWith({
|
||||
elserInferenceId: defaultInferenceEndpoints.ELSER,
|
||||
indexName,
|
||||
mappings,
|
||||
manifestVersion: TEST_FORMAT_VERSION,
|
||||
|
|
|
@ -13,6 +13,7 @@ import {
|
|||
DocumentationProduct,
|
||||
type ProductName,
|
||||
} from '@kbn/product-doc-common';
|
||||
import { defaultInferenceEndpoints } from '@kbn/inference-common';
|
||||
import type { ProductDocInstallClient } from '../doc_install_status';
|
||||
import {
|
||||
downloadToDisk,
|
||||
|
@ -37,6 +38,7 @@ interface PackageInstallerOpts {
|
|||
productDocClient: ProductDocInstallClient;
|
||||
artifactRepositoryUrl: string;
|
||||
kibanaVersion: string;
|
||||
elserInferenceId?: string;
|
||||
}
|
||||
|
||||
export class PackageInstaller {
|
||||
|
@ -46,6 +48,7 @@ export class PackageInstaller {
|
|||
private readonly productDocClient: ProductDocInstallClient;
|
||||
private readonly artifactRepositoryUrl: string;
|
||||
private readonly currentVersion: string;
|
||||
private readonly elserInferenceId?: string;
|
||||
|
||||
constructor({
|
||||
artifactsFolder,
|
||||
|
@ -53,6 +56,7 @@ export class PackageInstaller {
|
|||
esClient,
|
||||
productDocClient,
|
||||
artifactRepositoryUrl,
|
||||
elserInferenceId,
|
||||
kibanaVersion,
|
||||
}: PackageInstallerOpts) {
|
||||
this.esClient = esClient;
|
||||
|
@ -61,6 +65,7 @@ export class PackageInstaller {
|
|||
this.artifactRepositoryUrl = artifactRepositoryUrl;
|
||||
this.currentVersion = majorMinor(kibanaVersion);
|
||||
this.log = logger;
|
||||
this.elserInferenceId = elserInferenceId || defaultInferenceEndpoints.ELSER;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -146,7 +151,11 @@ export class PackageInstaller {
|
|||
productVersion,
|
||||
});
|
||||
|
||||
await ensureDefaultElserDeployed({ client: this.esClient });
|
||||
if (this.elserInferenceId === defaultInferenceEndpoints.ELSER) {
|
||||
await ensureDefaultElserDeployed({
|
||||
client: this.esClient,
|
||||
});
|
||||
}
|
||||
|
||||
const artifactFileName = getArtifactName({ productName, productVersion });
|
||||
const artifactUrl = `${this.artifactRepositoryUrl}/${artifactFileName}`;
|
||||
|
@ -172,6 +181,7 @@ export class PackageInstaller {
|
|||
mappings,
|
||||
manifestVersion,
|
||||
esClient: this.esClient,
|
||||
elserInferenceId: this.elserInferenceId,
|
||||
log: this.log,
|
||||
});
|
||||
|
||||
|
|
|
@ -17,18 +17,20 @@ export const createIndex = async ({
|
|||
manifestVersion,
|
||||
mappings,
|
||||
log,
|
||||
elserInferenceId = internalElserInferenceId,
|
||||
}: {
|
||||
esClient: ElasticsearchClient;
|
||||
indexName: string;
|
||||
manifestVersion: string;
|
||||
mappings: MappingTypeMapping;
|
||||
log: Logger;
|
||||
elserInferenceId?: string;
|
||||
}) => {
|
||||
log.debug(`Creating index ${indexName}`);
|
||||
|
||||
const legacySemanticText = isLegacySemanticTextVersion(manifestVersion);
|
||||
|
||||
overrideInferenceId(mappings, internalElserInferenceId);
|
||||
overrideInferenceId(mappings, elserInferenceId);
|
||||
|
||||
await esClient.indices.create({
|
||||
index: indexName,
|
||||
|
|
|
@ -47,11 +47,13 @@ const indexContentFile = async ({
|
|||
contentBuffer,
|
||||
esClient,
|
||||
legacySemanticText,
|
||||
elserInferenceId = internalElserInferenceId,
|
||||
}: {
|
||||
indexName: string;
|
||||
contentBuffer: Buffer;
|
||||
esClient: ElasticsearchClient;
|
||||
legacySemanticText: boolean;
|
||||
elserInferenceId?: string;
|
||||
}) => {
|
||||
const fileContent = contentBuffer.toString('utf-8');
|
||||
const lines = fileContent.split('\n');
|
||||
|
@ -65,7 +67,7 @@ const indexContentFile = async ({
|
|||
.map((doc) =>
|
||||
rewriteInferenceId({
|
||||
document: doc,
|
||||
inferenceId: internalElserInferenceId,
|
||||
inferenceId: elserInferenceId,
|
||||
legacySemanticText,
|
||||
})
|
||||
);
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
"id": "elasticAssistant",
|
||||
"browser": false,
|
||||
"server": true,
|
||||
"configPath": [
|
||||
"xpack",
|
||||
"elasticAssistant"
|
||||
],
|
||||
"requiredPlugins": [
|
||||
"actions",
|
||||
"alerting",
|
||||
|
|
|
@ -63,7 +63,7 @@ describe('AIAssistantKnowledgeBaseDataClient', () => {
|
|||
kibanaVersion: '8.8.0',
|
||||
ml,
|
||||
getElserId: getElserId.mockResolvedValue('elser-id'),
|
||||
modelIdOverride: false,
|
||||
elserInferenceId: ASSISTANT_ELSER_INFERENCE_ID,
|
||||
getIsKBSetupInProgress: mockGetIsKBSetupInProgress.mockReturnValue(false),
|
||||
getProductDocumentationStatus: jest.fn().mockResolvedValue('installed'),
|
||||
ingestPipelineResourceName: 'something',
|
||||
|
@ -286,9 +286,6 @@ describe('AIAssistantKnowledgeBaseDataClient', () => {
|
|||
const client = new AIAssistantKnowledgeBaseDataClient(mockOptions);
|
||||
await client.setupKnowledgeBase({});
|
||||
|
||||
// install model
|
||||
expect(trainedModelsProviderMock.installElasticModel).toHaveBeenCalledWith('elser-id');
|
||||
|
||||
expect(loadSecurityLabs).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ import { AUDIT_OUTCOME, KnowledgeBaseAuditAction, knowledgeBaseAuditEvent } from
|
|||
* configuration after initial plugin start
|
||||
*/
|
||||
export interface GetAIAssistantKnowledgeBaseDataClientParams {
|
||||
modelIdOverride?: string;
|
||||
elserInferenceId?: string;
|
||||
manageGlobalKnowledgeBaseAIAssistant?: boolean;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ export interface KnowledgeBaseDataClientParams extends AIAssistantDataClientPara
|
|||
setIsKBSetupInProgress: (spaceId: string, isInProgress: boolean) => void;
|
||||
manageGlobalKnowledgeBaseAIAssistant: boolean;
|
||||
getTrainedModelsProvider: () => ReturnType<TrainedModelsProvider['trainedModelsProvider']>;
|
||||
modelIdOverride: boolean;
|
||||
elserInferenceId?: string;
|
||||
}
|
||||
export class AIAssistantKnowledgeBaseDataClient extends AIAssistantDataClient {
|
||||
constructor(public readonly options: KnowledgeBaseDataClientParams) {
|
||||
|
@ -163,9 +163,8 @@ export class AIAssistantKnowledgeBaseDataClient extends AIAssistantDataClient {
|
|||
};
|
||||
|
||||
public getInferenceEndpointId = async () => {
|
||||
// Don't use default enpdpoint for pt_tiny_elser
|
||||
if (this.options.modelIdOverride) {
|
||||
return ASSISTANT_ELSER_INFERENCE_ID;
|
||||
if (this.options.elserInferenceId) {
|
||||
return this.options.elserInferenceId;
|
||||
}
|
||||
const esClient = await this.options.elasticsearchClientPromise;
|
||||
|
||||
|
@ -394,39 +393,42 @@ export class AIAssistantKnowledgeBaseDataClient extends AIAssistantDataClient {
|
|||
this.options.logger.info('No legacy ESQL or Security Labs knowledge base docs to delete');
|
||||
}
|
||||
|
||||
/*
|
||||
// `pt_tiny_elser` is deployed before the KB setup is started, so we don't need to check for it
|
||||
if (!this.options.elserInferenceId) {
|
||||
/*
|
||||
#1 Check if ELSER model is downloaded
|
||||
#2 Check if inference endpoint is deployed
|
||||
#3 Dry run ELSER model deployment if not already deployed
|
||||
#4 Create inference endpoint if not deployed / delete and create inference endpoint if model was not deployed
|
||||
#5 Load Security Labs docs
|
||||
*/
|
||||
const isInstalled = await this.isModelInstalled();
|
||||
if (!isInstalled) {
|
||||
await this.installModel();
|
||||
await pRetry(
|
||||
async () =>
|
||||
(await this.isModelInstalled())
|
||||
? Promise.resolve()
|
||||
: Promise.reject(new Error('Model not installed')),
|
||||
{ minTimeout: 30000, maxTimeout: 30000, retries: 20 }
|
||||
);
|
||||
this.options.logger.debug(`ELSER model '${elserId}' successfully installed!`);
|
||||
} else {
|
||||
this.options.logger.debug(`ELSER model '${elserId}' is already installed`);
|
||||
}
|
||||
const isInstalled = await this.isModelInstalled();
|
||||
if (!isInstalled) {
|
||||
await this.installModel();
|
||||
await pRetry(
|
||||
async () =>
|
||||
(await this.isModelInstalled())
|
||||
? Promise.resolve()
|
||||
: Promise.reject(new Error('Model not installed')),
|
||||
{ minTimeout: 30000, maxTimeout: 30000, retries: 20 }
|
||||
);
|
||||
this.options.logger.debug(`ELSER model '${elserId}' successfully installed!`);
|
||||
} else {
|
||||
this.options.logger.debug(`ELSER model '${elserId}' is already installed`);
|
||||
}
|
||||
|
||||
const inferenceExists = await this.isInferenceEndpointExists();
|
||||
if (!inferenceExists) {
|
||||
await this.createInferenceEndpoint();
|
||||
const inferenceExists = await this.isInferenceEndpointExists();
|
||||
if (!inferenceExists) {
|
||||
await this.createInferenceEndpoint();
|
||||
|
||||
this.options.logger.debug(
|
||||
`Inference endpoint for ELSER model '${elserId}' successfully deployed!`
|
||||
);
|
||||
} else {
|
||||
this.options.logger.debug(
|
||||
`Inference endpoint for ELSER model '${elserId}' is already deployed`
|
||||
);
|
||||
this.options.logger.debug(
|
||||
`Inference endpoint for ELSER model '${elserId}' successfully deployed!`
|
||||
);
|
||||
} else {
|
||||
this.options.logger.debug(
|
||||
`Inference endpoint for ELSER model '${elserId}' is already deployed`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ignoreSecurityLabs) {
|
||||
|
|
|
@ -75,6 +75,7 @@ export function getResourceName(resource: string) {
|
|||
export interface AIAssistantServiceOpts {
|
||||
logger: Logger;
|
||||
kibanaVersion: string;
|
||||
elserInferenceId?: string;
|
||||
elasticsearchClientPromise: Promise<ElasticsearchClient>;
|
||||
ml: MlPluginSetup;
|
||||
taskManager: TaskManagerSetupContract;
|
||||
|
@ -111,7 +112,7 @@ export class AIAssistantService {
|
|||
private initialized: boolean;
|
||||
private isInitializing: boolean = false;
|
||||
private getElserId: GetElser;
|
||||
private modelIdOverride: boolean = false;
|
||||
private elserInferenceId?: string;
|
||||
private conversationsDataStream: DataStreamSpacesAdapter;
|
||||
private knowledgeBaseDataStream: DataStreamSpacesAdapter;
|
||||
private promptsDataStream: DataStreamSpacesAdapter;
|
||||
|
@ -131,6 +132,7 @@ export class AIAssistantService {
|
|||
constructor(private readonly options: AIAssistantServiceOpts) {
|
||||
this.initialized = false;
|
||||
this.getElserId = createGetElserId(options.ml.trainedModelsProvider);
|
||||
this.elserInferenceId = options.elserInferenceId;
|
||||
this.savedAttackDiscoveries = options.savedAttackDiscoveries ?? false;
|
||||
|
||||
this.conversationsDataStream = this.createDataStream({
|
||||
|
@ -378,13 +380,7 @@ export class AIAssistantService {
|
|||
?.inference_id === ASSISTANT_ELSER_INFERENCE_ID
|
||||
);
|
||||
|
||||
// Used only for testing purposes
|
||||
if (this.modelIdOverride && !isUsingDedicatedInferenceEndpoint) {
|
||||
this.knowledgeBaseDataStream = await this.rolloverDataStream(
|
||||
ELASTICSEARCH_ELSER_INFERENCE_ID,
|
||||
ASSISTANT_ELSER_INFERENCE_ID
|
||||
);
|
||||
} else if (isUsingDedicatedInferenceEndpoint) {
|
||||
if (isUsingDedicatedInferenceEndpoint) {
|
||||
this.knowledgeBaseDataStream = await this.rolloverDataStream(
|
||||
ASSISTANT_ELSER_INFERENCE_ID,
|
||||
ELASTICSEARCH_ELSER_INFERENCE_ID
|
||||
|
@ -417,12 +413,9 @@ export class AIAssistantService {
|
|||
type: 'semantic_text',
|
||||
array: false,
|
||||
required: false,
|
||||
...(this.elserInferenceId ? { inference_id: this.elserInferenceId } : {}),
|
||||
},
|
||||
},
|
||||
settings: {
|
||||
// force new semantic_text field behavior
|
||||
'index.mapping.semantic_text.use_legacy_format': false,
|
||||
},
|
||||
writeIndexOnly: true,
|
||||
});
|
||||
}
|
||||
|
@ -613,18 +606,11 @@ export class AIAssistantService {
|
|||
getTrainedModelsProvider: () => ReturnType<TrainedModelsProvider['trainedModelsProvider']>;
|
||||
}
|
||||
): Promise<AIAssistantKnowledgeBaseDataClient | null> {
|
||||
// If modelIdOverride is set, swap getElserId(), and ensure the pipeline is re-created with the correct model
|
||||
if (opts?.modelIdOverride != null) {
|
||||
const modelIdOverride = opts.modelIdOverride;
|
||||
this.getElserId = async () => modelIdOverride;
|
||||
this.modelIdOverride = true;
|
||||
}
|
||||
|
||||
// If a V2 KnowledgeBase has never been initialized or a modelIdOverride is provided, we need to reinitialize all persistence resources to make sure
|
||||
// If a V2 KnowledgeBase has never been initialized we need to reinitialize all persistence resources to make sure
|
||||
// they're using the correct model/mappings. Technically all existing KB data is stale since it was created
|
||||
// with a different model/mappings, but modelIdOverride is only intended for testing purposes at this time
|
||||
// with a different model/mappings.
|
||||
// Added hasInitializedV2KnowledgeBase to prevent the console noise from re-init on each KB request
|
||||
if (!this.hasInitializedV2KnowledgeBase || opts?.modelIdOverride != null) {
|
||||
if (!this.hasInitializedV2KnowledgeBase) {
|
||||
await this.initializeResources();
|
||||
this.hasInitializedV2KnowledgeBase = true;
|
||||
}
|
||||
|
@ -646,7 +632,7 @@ export class AIAssistantService {
|
|||
getProductDocumentationStatus: this.getProductDocumentationStatus.bind(this),
|
||||
kibanaVersion: this.options.kibanaVersion,
|
||||
ml: this.options.ml,
|
||||
modelIdOverride: !!opts.modelIdOverride,
|
||||
elserInferenceId: this.options.elserInferenceId,
|
||||
setIsKBSetupInProgress: this.setIsKBSetupInProgress.bind(this),
|
||||
spaceId: opts.spaceId,
|
||||
manageGlobalKnowledgeBaseAIAssistant: opts.manageGlobalKnowledgeBaseAIAssistant ?? false,
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
import { schema } from '@kbn/config-schema';
|
||||
import { ELASTICSEARCH_ELSER_INFERENCE_ID } from './ai_assistant_data_clients/knowledge_base/field_maps_configuration';
|
||||
|
||||
export interface ConfigSchema {
|
||||
elserInferenceId: string;
|
||||
responseTimeout: number;
|
||||
}
|
||||
export const configSchema = schema.object({
|
||||
elserInferenceId: schema.string({ defaultValue: ELASTICSEARCH_ELSER_INFERENCE_ID }),
|
||||
});
|
|
@ -6,9 +6,13 @@
|
|||
*/
|
||||
|
||||
import type { FeatureFlagDefinitions } from '@kbn/core-feature-flags-server';
|
||||
import { PluginInitializerContext } from '@kbn/core/server';
|
||||
import type { PluginConfigDescriptor, PluginInitializerContext } from '@kbn/core/server';
|
||||
import { ATTACK_DISCOVERY_ALERTS_ENABLED_FEATURE_FLAG } from '@kbn/elastic-assistant-common';
|
||||
import { configSchema } from './config_schema';
|
||||
|
||||
export const config: PluginConfigDescriptor = {
|
||||
schema: configSchema,
|
||||
};
|
||||
export async function plugin(initializerContext: PluginInitializerContext) {
|
||||
const { ElasticAssistantPlugin } = await import('./plugin');
|
||||
return new ElasticAssistantPlugin(initializerContext);
|
||||
|
|
|
@ -12,7 +12,6 @@ import {
|
|||
AssistantFeatures,
|
||||
} from '@kbn/elastic-assistant-common';
|
||||
import { ReplaySubject, type Subject } from 'rxjs';
|
||||
import { MlPluginSetup } from '@kbn/ml-plugin/server';
|
||||
import { events } from './lib/telemetry/event_based_telemetry';
|
||||
import {
|
||||
AssistantTool,
|
||||
|
@ -30,8 +29,9 @@ import { PLUGIN_ID } from '../common/constants';
|
|||
import { registerEventLogProvider } from './register_event_log_provider';
|
||||
import { registerRoutes } from './routes/register_routes';
|
||||
import { CallbackIds, appContextService } from './services/app_context';
|
||||
import { createGetElserId, removeLegacyQuickPrompt } from './ai_assistant_service/helpers';
|
||||
import { removeLegacyQuickPrompt } from './ai_assistant_service/helpers';
|
||||
import { getAttackDiscoveryScheduleType } from './lib/attack_discovery/schedules/register_schedule/definition';
|
||||
import type { ConfigSchema } from './config_schema';
|
||||
|
||||
export class ElasticAssistantPlugin
|
||||
implements
|
||||
|
@ -46,13 +46,13 @@ export class ElasticAssistantPlugin
|
|||
private assistantService: AIAssistantService | undefined;
|
||||
private pluginStop$: Subject<void>;
|
||||
private readonly kibanaVersion: PluginInitializerContext['env']['packageInfo']['version'];
|
||||
private mlTrainedModelsProvider?: MlPluginSetup['trainedModelsProvider'];
|
||||
private getElserId?: () => Promise<string>;
|
||||
private readonly config: ConfigSchema;
|
||||
|
||||
constructor(initializerContext: PluginInitializerContext) {
|
||||
this.pluginStop$ = new ReplaySubject(1);
|
||||
this.logger = initializerContext.logger.get();
|
||||
this.kibanaVersion = initializerContext.env.packageInfo.version;
|
||||
this.config = initializerContext.config.get<ConfigSchema>();
|
||||
}
|
||||
|
||||
public setup(
|
||||
|
@ -69,6 +69,7 @@ export class ElasticAssistantPlugin
|
|||
ml: plugins.ml,
|
||||
taskManager: plugins.taskManager,
|
||||
kibanaVersion: this.kibanaVersion,
|
||||
elserInferenceId: this.config.elserInferenceId,
|
||||
elasticsearchClientPromise: core
|
||||
.getStartServices()
|
||||
.then(([{ elasticsearch }]) => elasticsearch.client.asInternalUser),
|
||||
|
@ -100,10 +101,7 @@ export class ElasticAssistantPlugin
|
|||
);
|
||||
events.forEach((eventConfig) => core.analytics.registerEventType(eventConfig));
|
||||
|
||||
this.mlTrainedModelsProvider = plugins.ml.trainedModelsProvider;
|
||||
this.getElserId = createGetElserId(this.mlTrainedModelsProvider);
|
||||
|
||||
registerRoutes(router, this.logger, this.getElserId);
|
||||
registerRoutes(router, this.logger, this.config);
|
||||
|
||||
// The featureFlags service is not available in the core setup, so we need
|
||||
// to wait for the start services to be available to read the feature flags.
|
||||
|
@ -149,11 +147,6 @@ export class ElasticAssistantPlugin
|
|||
this.logger.debug('elasticAssistant: Started');
|
||||
appContextService.start({ logger: this.logger });
|
||||
|
||||
plugins.licensing.license$.subscribe(() => {
|
||||
if (this.mlTrainedModelsProvider) {
|
||||
this.getElserId = createGetElserId(this.mlTrainedModelsProvider);
|
||||
}
|
||||
});
|
||||
removeLegacyQuickPrompt(core.elasticsearch.client.asInternalUser)
|
||||
.then((res) => {
|
||||
if (res?.total)
|
||||
|
|
|
@ -146,8 +146,6 @@ const mockResponse = {
|
|||
};
|
||||
|
||||
describe('chatCompleteRoute', () => {
|
||||
const mockGetElser = jest.fn().mockResolvedValue('.elser_model_2');
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockAppendAssistantMessageToConversation.mockResolvedValue(true);
|
||||
|
@ -236,10 +234,7 @@ describe('chatCompleteRoute', () => {
|
|||
},
|
||||
};
|
||||
|
||||
chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
);
|
||||
chatCompleteRoute(mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>);
|
||||
});
|
||||
|
||||
it('returns the expected error when executeCustomLlmChain fails', async () => {
|
||||
|
@ -269,8 +264,7 @@ describe('chatCompleteRoute', () => {
|
|||
};
|
||||
|
||||
await chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -304,8 +298,7 @@ describe('chatCompleteRoute', () => {
|
|||
};
|
||||
|
||||
await chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -338,8 +331,7 @@ describe('chatCompleteRoute', () => {
|
|||
};
|
||||
|
||||
await chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -369,8 +361,7 @@ describe('chatCompleteRoute', () => {
|
|||
};
|
||||
|
||||
await chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -399,8 +390,7 @@ describe('chatCompleteRoute', () => {
|
|||
},
|
||||
};
|
||||
await chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -434,10 +424,7 @@ describe('chatCompleteRoute', () => {
|
|||
},
|
||||
};
|
||||
|
||||
chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
);
|
||||
chatCompleteRoute(mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>);
|
||||
});
|
||||
|
||||
it('should not add assistant reply to existing conversation when `persist=false`', async () => {
|
||||
|
@ -466,10 +453,7 @@ describe('chatCompleteRoute', () => {
|
|||
},
|
||||
};
|
||||
|
||||
chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
);
|
||||
chatCompleteRoute(mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>);
|
||||
});
|
||||
|
||||
it('should add assistant reply to new conversation when `persist=true`', async () => {
|
||||
|
@ -503,10 +487,7 @@ describe('chatCompleteRoute', () => {
|
|||
},
|
||||
};
|
||||
|
||||
chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
);
|
||||
chatCompleteRoute(mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>);
|
||||
});
|
||||
|
||||
it('should not create a new conversation when `persist=false`', async () => {
|
||||
|
@ -535,9 +516,6 @@ describe('chatCompleteRoute', () => {
|
|||
},
|
||||
};
|
||||
|
||||
chatCompleteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
);
|
||||
chatCompleteRoute(mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -24,7 +24,7 @@ import {
|
|||
import { buildRouteValidationWithZod } from '@kbn/elastic-assistant-common/impl/schemas/common';
|
||||
import { getRequestAbortedSignal } from '@kbn/data-plugin/server';
|
||||
import { INVOKE_ASSISTANT_ERROR_EVENT } from '../../lib/telemetry/event_based_telemetry';
|
||||
import { ElasticAssistantPluginRouter, GetElser } from '../../types';
|
||||
import { ElasticAssistantPluginRouter } from '../../types';
|
||||
import { buildResponse } from '../../lib/build_response';
|
||||
import {
|
||||
appendAssistantMessageToConversation,
|
||||
|
@ -41,10 +41,7 @@ export const SYSTEM_PROMPT_CONTEXT_NON_I18N = (context: string) => {
|
|||
return `CONTEXT:\n"""\n${context}\n"""`;
|
||||
};
|
||||
|
||||
export const chatCompleteRoute = (
|
||||
router: ElasticAssistantPluginRouter,
|
||||
getElser: GetElser
|
||||
): void => {
|
||||
export const chatCompleteRoute = (router: ElasticAssistantPluginRouter): void => {
|
||||
router.versioned
|
||||
.post({
|
||||
access: 'public',
|
||||
|
@ -227,7 +224,6 @@ export const chatCompleteRoute = (
|
|||
isOssModel,
|
||||
conversationId,
|
||||
context: ctx,
|
||||
getElser,
|
||||
logger,
|
||||
inference,
|
||||
messages: messages ?? [],
|
||||
|
|
|
@ -60,8 +60,9 @@ export const getEvaluateRoute = (router: IRouter<ElasticAssistantRequestHandlerC
|
|||
return checkResponse.response;
|
||||
}
|
||||
|
||||
// Fetch datasets from LangSmith // TODO: plumb apiKey so this will work in cloud w/o env vars
|
||||
const datasets = await fetchLangSmithDatasets({ logger });
|
||||
const datasets = await fetchLangSmithDatasets({
|
||||
logger,
|
||||
});
|
||||
|
||||
try {
|
||||
return response.ok({ body: { graphs: Object.keys(ASSISTANT_GRAPH_MAP), datasets } });
|
||||
|
|
|
@ -42,13 +42,11 @@ describe('Post Evaluate Route', () => {
|
|||
},
|
||||
} as AuthenticatedUser;
|
||||
|
||||
const mockGetElser = jest.fn().mockResolvedValue('.elser_model_2');
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
context.elasticAssistant.getCurrentUser.mockResolvedValue(mockUser);
|
||||
|
||||
postEvaluateRoute(server.router, mockGetElser);
|
||||
postEvaluateRoute(server.router);
|
||||
});
|
||||
|
||||
describe('Capabilities', () => {
|
||||
|
|
|
@ -39,7 +39,7 @@ import { formatPrompt } from '../../lib/langchain/graphs/default_assistant_graph
|
|||
import { getPrompt as localGetPrompt, promptDictionary } from '../../lib/prompt';
|
||||
import { buildResponse } from '../../lib/build_response';
|
||||
import { AssistantDataClients } from '../../lib/langchain/executors/types';
|
||||
import { AssistantToolParams, ElasticAssistantRequestHandlerContext, GetElser } from '../../types';
|
||||
import { AssistantToolParams, ElasticAssistantRequestHandlerContext } from '../../types';
|
||||
import { DEFAULT_PLUGIN_NAME, performChecks } from '../helpers';
|
||||
import { fetchLangSmithDataset } from './utils';
|
||||
import { transformESSearchToAnonymizationFields } from '../../ai_assistant_data_clients/anonymization_fields/helpers';
|
||||
|
@ -60,10 +60,7 @@ const ROUTE_HANDLER_TIMEOUT = 10 * 60 * 1000; // 10 * 60 seconds = 10 minutes
|
|||
const LANG_CHAIN_TIMEOUT = ROUTE_HANDLER_TIMEOUT - 10_000; // 9 minutes 50 seconds
|
||||
const CONNECTOR_TIMEOUT = LANG_CHAIN_TIMEOUT - 10_000; // 9 minutes 40 seconds
|
||||
|
||||
export const postEvaluateRoute = (
|
||||
router: IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
getElser: GetElser
|
||||
) => {
|
||||
export const postEvaluateRoute = (router: IRouter<ElasticAssistantRequestHandlerContext>) => {
|
||||
router.versioned
|
||||
.post({
|
||||
access: INTERNAL_API_ACCESS,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
import { Client, Example } from 'langsmith';
|
||||
import type { Logger } from '@kbn/core/server';
|
||||
import { isLangSmithEnabled } from '@kbn/langchain/server/tracers/langsmith';
|
||||
import { isEmpty } from 'lodash';
|
||||
|
||||
/**
|
||||
* Fetches a dataset from LangSmith. Note that `client` will use env vars unless langSmithApiKey is specified
|
||||
|
@ -54,7 +55,7 @@ export const fetchLangSmithDatasets = async ({
|
|||
langSmithApiKey?: string;
|
||||
}): Promise<string[]> => {
|
||||
try {
|
||||
const client = new Client({ apiKey: langSmithApiKey });
|
||||
const client = new Client(!isEmpty(langSmithApiKey) ? { apiKey: langSmithApiKey } : undefined);
|
||||
const datasets = [];
|
||||
for await (const dataset of client.listDatasets()) {
|
||||
datasets.push(dataset);
|
||||
|
|
|
@ -52,7 +52,7 @@ import {
|
|||
import { getLangChainMessages } from '../lib/langchain/helpers';
|
||||
|
||||
import { AIAssistantConversationsDataClient } from '../ai_assistant_data_clients/conversations';
|
||||
import { ElasticAssistantRequestHandlerContext, GetElser } from '../types';
|
||||
import { ElasticAssistantRequestHandlerContext } from '../types';
|
||||
import { callAssistantGraph } from '../lib/langchain/graphs/default_assistant_graph';
|
||||
|
||||
interface GetPluginNameFromRequestParams {
|
||||
|
@ -249,7 +249,6 @@ export interface LangChainExecuteParams {
|
|||
traceData?: Message['traceData'],
|
||||
isError?: boolean
|
||||
) => Promise<void>;
|
||||
getElser: GetElser;
|
||||
response: KibanaResponseFactory;
|
||||
responseLanguage?: string;
|
||||
savedObjectsClient: SavedObjectsClientContract;
|
||||
|
@ -274,7 +273,6 @@ export const langChainExecute = async ({
|
|||
logger,
|
||||
conversationId,
|
||||
onLlmResponse,
|
||||
getElser,
|
||||
response,
|
||||
responseLanguage,
|
||||
isStream = true,
|
||||
|
|
|
@ -59,9 +59,7 @@ export const postKnowledgeBaseRoute = (router: ElasticAssistantPluginRouter) =>
|
|||
|
||||
try {
|
||||
const knowledgeBaseDataClient =
|
||||
await assistantContext.getAIAssistantKnowledgeBaseDataClient({
|
||||
modelIdOverride: request.query.modelId,
|
||||
});
|
||||
await assistantContext.getAIAssistantKnowledgeBaseDataClient();
|
||||
if (!knowledgeBaseDataClient) {
|
||||
return response.custom({ body: { success: false }, statusCode: 500 });
|
||||
}
|
||||
|
|
|
@ -129,8 +129,6 @@ const mockResponse = {
|
|||
};
|
||||
|
||||
describe('postActionsConnectorExecuteRoute', () => {
|
||||
const mockGetElser = jest.fn().mockResolvedValue('.elser_model_2');
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
license.hasAtLeast.mockReturnValue(true);
|
||||
|
@ -209,8 +207,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -238,8 +235,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -277,8 +273,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -311,8 +306,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -352,8 +346,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -392,8 +385,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
},
|
||||
};
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -429,8 +421,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -465,8 +456,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
},
|
||||
};
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
it('calls getPrompt with promptIds when passed in request.body', async () => {
|
||||
|
@ -500,8 +490,7 @@ describe('postActionsConnectorExecuteRoute', () => {
|
|||
};
|
||||
|
||||
await postActionsConnectorExecuteRoute(
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
mockGetElser
|
||||
mockRouter as unknown as IRouter<ElasticAssistantRequestHandlerContext>
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -25,7 +25,7 @@ import { buildRouteValidationWithZod } from '@kbn/elastic-assistant-common/impl/
|
|||
import { getPrompt } from '../lib/prompt';
|
||||
import { INVOKE_ASSISTANT_ERROR_EVENT } from '../lib/telemetry/event_based_telemetry';
|
||||
import { buildResponse } from '../lib/build_response';
|
||||
import { ElasticAssistantRequestHandlerContext, GetElser } from '../types';
|
||||
import { ElasticAssistantRequestHandlerContext } from '../types';
|
||||
import {
|
||||
appendAssistantMessageToConversation,
|
||||
getIsKnowledgeBaseInstalled,
|
||||
|
@ -36,8 +36,7 @@ import {
|
|||
import { isOpenSourceModel } from './utils';
|
||||
|
||||
export const postActionsConnectorExecuteRoute = (
|
||||
router: IRouter<ElasticAssistantRequestHandlerContext>,
|
||||
getElser: GetElser
|
||||
router: IRouter<ElasticAssistantRequestHandlerContext>
|
||||
) => {
|
||||
router.versioned
|
||||
.post({
|
||||
|
@ -181,7 +180,6 @@ export const postActionsConnectorExecuteRoute = (
|
|||
isOssModel,
|
||||
conversationId,
|
||||
context: ctx,
|
||||
getElser,
|
||||
logger,
|
||||
inference,
|
||||
messages: (newMessage ? [newMessage] : messages) ?? [],
|
||||
|
|
|
@ -134,14 +134,13 @@ const enableAttackDiscoverySchedulesRouteMock = enableAttackDiscoverySchedulesRo
|
|||
|
||||
describe('registerRoutes', () => {
|
||||
const loggerMock = loggingSystemMock.createLogger();
|
||||
const getElserIdMock = jest.fn();
|
||||
let server: ReturnType<typeof serverMock.create>;
|
||||
|
||||
beforeEach(async () => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
server = serverMock.create();
|
||||
registerRoutes(server.router, loggerMock, getElserIdMock);
|
||||
registerRoutes(server.router, loggerMock);
|
||||
});
|
||||
|
||||
it('should call `cancelAttackDiscoveryRouteMock`', () => {
|
||||
|
@ -201,7 +200,7 @@ describe('registerRoutes', () => {
|
|||
});
|
||||
|
||||
it('should call `postEvaluateRouteMock`', () => {
|
||||
expect(postEvaluateRouteMock).toHaveBeenCalledWith(server.router, getElserIdMock);
|
||||
expect(postEvaluateRouteMock).toHaveBeenCalledWith(server.router);
|
||||
});
|
||||
|
||||
it('should call `getCapabilitiesRouteMock`', () => {
|
||||
|
@ -225,14 +224,11 @@ describe('registerRoutes', () => {
|
|||
});
|
||||
|
||||
it('should call `chatCompleteRouteMock`', () => {
|
||||
expect(chatCompleteRouteMock).toHaveBeenCalledWith(server.router, getElserIdMock);
|
||||
expect(chatCompleteRouteMock).toHaveBeenCalledWith(server.router);
|
||||
});
|
||||
|
||||
it('should call `postActionsConnectorExecuteRouteMock`', () => {
|
||||
expect(postActionsConnectorExecuteRouteMock).toHaveBeenCalledWith(
|
||||
server.router,
|
||||
getElserIdMock
|
||||
);
|
||||
expect(postActionsConnectorExecuteRouteMock).toHaveBeenCalledWith(server.router);
|
||||
});
|
||||
|
||||
it('should call `bulkActionKnowledgeBaseEntriesRouteMock`', () => {
|
||||
|
|
|
@ -12,7 +12,7 @@ import { cancelAttackDiscoveryRoute } from './attack_discovery/post/cancel/cance
|
|||
import { findAttackDiscoveriesRoute } from './attack_discovery/get/find_attack_discoveries';
|
||||
import { getAttackDiscoveryRoute } from './attack_discovery/get/get_attack_discovery';
|
||||
import { postAttackDiscoveryRoute } from './attack_discovery/post/post_attack_discovery';
|
||||
import { ElasticAssistantPluginRouter, GetElser } from '../types';
|
||||
import { ElasticAssistantPluginRouter } from '../types';
|
||||
import { createConversationRoute } from './user_conversations/create_route';
|
||||
import { deleteConversationRoute } from './user_conversations/delete_route';
|
||||
import { readConversationRoute } from './user_conversations/read_route';
|
||||
|
@ -53,15 +53,16 @@ import { deleteAttackDiscoverySchedulesRoute } from './attack_discovery/schedule
|
|||
import { findAttackDiscoverySchedulesRoute } from './attack_discovery/schedules/find';
|
||||
import { disableAttackDiscoverySchedulesRoute } from './attack_discovery/schedules/disable';
|
||||
import { enableAttackDiscoverySchedulesRoute } from './attack_discovery/schedules/enable';
|
||||
import type { ConfigSchema } from '../config_schema';
|
||||
|
||||
export const registerRoutes = (
|
||||
router: ElasticAssistantPluginRouter,
|
||||
logger: Logger,
|
||||
getElserId: GetElser
|
||||
config?: ConfigSchema
|
||||
) => {
|
||||
/** PUBLIC */
|
||||
// Chat
|
||||
chatCompleteRoute(router, getElserId);
|
||||
chatCompleteRoute(router);
|
||||
|
||||
/** INTERNAL */
|
||||
// Capabilities
|
||||
|
@ -94,11 +95,11 @@ export const registerRoutes = (
|
|||
bulkActionKnowledgeBaseEntriesRoute(router);
|
||||
|
||||
// Actions Connector Execute (LLM Wrapper)
|
||||
postActionsConnectorExecuteRoute(router, getElserId);
|
||||
postActionsConnectorExecuteRoute(router);
|
||||
|
||||
// Evaluate
|
||||
getEvaluateRoute(router);
|
||||
postEvaluateRoute(router, getElserId);
|
||||
postEvaluateRoute(router);
|
||||
|
||||
// Prompts
|
||||
bulkPromptsRoute(router, logger);
|
||||
|
|
|
@ -111,7 +111,7 @@ export class RequestContextFactory implements IRequestContextFactory {
|
|||
savedObjectsClient,
|
||||
telemetry: core.analytics,
|
||||
|
||||
// Note: modelIdOverride is used here to enable setting up the KB using a different ELSER model, which
|
||||
// Note: elserInferenceId is used here to enable setting up the KB using a different ELSER model, which
|
||||
// is necessary for testing purposes (`pt_tiny_elser`).
|
||||
getAIAssistantKnowledgeBaseDataClient: memoize(async (params) => {
|
||||
const currentUser = await getCurrentUser();
|
||||
|
@ -127,7 +127,7 @@ export class RequestContextFactory implements IRequestContextFactory {
|
|||
logger: this.logger,
|
||||
licensing: context.licensing,
|
||||
currentUser,
|
||||
modelIdOverride: params?.modelIdOverride,
|
||||
elserInferenceId: params?.elserInferenceId,
|
||||
manageGlobalKnowledgeBaseAIAssistant:
|
||||
securitySolutionAssistant.manageGlobalKnowledgeBaseAIAssistant as boolean,
|
||||
// uses internal user to interact with ML API
|
||||
|
|
|
@ -183,6 +183,11 @@ export const configSchema = schema.object({
|
|||
}),
|
||||
}),
|
||||
}),
|
||||
siemRuleMigrations: schema.maybe(
|
||||
schema.object({
|
||||
elserInferenceId: schema.maybe(schema.string()),
|
||||
})
|
||||
),
|
||||
});
|
||||
|
||||
export type ConfigSchema = TypeOf<typeof configSchema>;
|
||||
|
|
|
@ -21,9 +21,9 @@ import type {
|
|||
IndexNameProviders,
|
||||
} from '../types';
|
||||
import {
|
||||
integrationsFieldMap,
|
||||
getIntegrationsFieldMap,
|
||||
getPrebuiltRulesFieldMap,
|
||||
migrationsFieldMaps,
|
||||
prebuiltRulesFieldMap,
|
||||
ruleMigrationResourcesFieldMap,
|
||||
ruleMigrationsFieldMap,
|
||||
} from './rule_migrations_field_maps';
|
||||
|
@ -45,7 +45,7 @@ interface CreateAdapterParams {
|
|||
export class RuleMigrationsDataService {
|
||||
private readonly adapters: Adapters;
|
||||
|
||||
constructor(private logger: Logger, private kibanaVersion: string) {
|
||||
constructor(private logger: Logger, private kibanaVersion: string, elserInferenceId?: string) {
|
||||
this.adapters = {
|
||||
migrations: this.createIndexPatternAdapter({
|
||||
adapterId: 'migrations',
|
||||
|
@ -61,11 +61,11 @@ export class RuleMigrationsDataService {
|
|||
}),
|
||||
integrations: this.createIndexAdapter({
|
||||
adapterId: 'integrations',
|
||||
fieldMap: integrationsFieldMap,
|
||||
fieldMap: getIntegrationsFieldMap({ elserInferenceId }),
|
||||
}),
|
||||
prebuiltrules: this.createIndexAdapter({
|
||||
adapterId: 'prebuiltrules',
|
||||
fieldMap: prebuiltRulesFieldMap,
|
||||
fieldMap: getPrebuiltRulesFieldMap({ elserInferenceId }),
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
|
|
@ -58,7 +58,11 @@ export const ruleMigrationResourcesFieldMap: FieldMap<
|
|||
updated_by: { type: 'keyword', required: false },
|
||||
};
|
||||
|
||||
export const integrationsFieldMap: FieldMap<SchemaFieldMapKeys<RuleMigrationIntegration>> = {
|
||||
export const getIntegrationsFieldMap: ({
|
||||
elserInferenceId,
|
||||
}: {
|
||||
elserInferenceId?: string;
|
||||
}) => FieldMap<SchemaFieldMapKeys<RuleMigrationIntegration>> = ({ elserInferenceId }) => ({
|
||||
id: { type: 'keyword', required: true },
|
||||
title: { type: 'text', required: true },
|
||||
description: { type: 'text', required: true },
|
||||
|
@ -66,16 +70,28 @@ export const integrationsFieldMap: FieldMap<SchemaFieldMapKeys<RuleMigrationInte
|
|||
'data_streams.dataset': { type: 'keyword', required: true },
|
||||
'data_streams.title': { type: 'text', required: true },
|
||||
'data_streams.index_pattern': { type: 'keyword', required: true },
|
||||
elser_embedding: { type: 'semantic_text', required: true },
|
||||
};
|
||||
elser_embedding: {
|
||||
type: 'semantic_text',
|
||||
required: true,
|
||||
...(elserInferenceId ? { inference_id: elserInferenceId } : {}),
|
||||
},
|
||||
});
|
||||
|
||||
export const prebuiltRulesFieldMap: FieldMap<SchemaFieldMapKeys<RuleMigrationPrebuiltRule>> = {
|
||||
export const getPrebuiltRulesFieldMap: ({
|
||||
elserInferenceId,
|
||||
}: {
|
||||
elserInferenceId?: string;
|
||||
}) => FieldMap<SchemaFieldMapKeys<RuleMigrationPrebuiltRule>> = ({ elserInferenceId }) => ({
|
||||
name: { type: 'text', required: true },
|
||||
description: { type: 'text', required: true },
|
||||
elser_embedding: { type: 'semantic_text', required: true },
|
||||
elser_embedding: {
|
||||
type: 'semantic_text',
|
||||
required: true,
|
||||
...(elserInferenceId ? { inference_id: elserInferenceId } : {}),
|
||||
},
|
||||
rule_id: { type: 'keyword', required: true },
|
||||
mitre_attack_ids: { type: 'keyword', array: true, required: false },
|
||||
};
|
||||
});
|
||||
|
||||
export const migrationsFieldMaps: FieldMap<SchemaFieldMapKeys<SiemMigration>> = {
|
||||
created_at: { type: 'date', required: true },
|
||||
|
|
|
@ -45,7 +45,7 @@ describe('SiemRuleMigrationsService', () => {
|
|||
});
|
||||
|
||||
it('should instantiate the rule migrations data stream adapter', () => {
|
||||
expect(MockRuleMigrationsDataService).toHaveBeenCalledWith(logger, kibanaVersion);
|
||||
expect(MockRuleMigrationsDataService).toHaveBeenCalledWith(logger, kibanaVersion, undefined);
|
||||
});
|
||||
|
||||
describe('when setup is called', () => {
|
||||
|
|
|
@ -44,9 +44,9 @@ export class SiemRuleMigrationsService {
|
|||
private taskService: RuleMigrationsTaskService;
|
||||
private logger: Logger;
|
||||
|
||||
constructor(logger: LoggerFactory, kibanaVersion: string) {
|
||||
constructor(logger: LoggerFactory, kibanaVersion: string, elserInferenceId?: string) {
|
||||
this.logger = logger.get('siemRuleMigrations');
|
||||
this.dataService = new RuleMigrationsDataService(this.logger, kibanaVersion);
|
||||
this.dataService = new RuleMigrationsDataService(this.logger, kibanaVersion, elserInferenceId);
|
||||
this.taskService = new RuleMigrationsTaskService(this.logger);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ describe('SiemMigrationsService', () => {
|
|||
});
|
||||
|
||||
it('should instantiate the rule migrations service', async () => {
|
||||
expect(MockSiemRuleMigrationsService).toHaveBeenCalledWith(logger, kibanaVersion);
|
||||
expect(MockSiemRuleMigrationsService).toHaveBeenCalledWith(logger, kibanaVersion, undefined);
|
||||
});
|
||||
|
||||
describe('when setup is called', () => {
|
||||
|
@ -100,7 +100,7 @@ describe('SiemMigrationsService', () => {
|
|||
});
|
||||
|
||||
it('should instantiate the rule migrations service', async () => {
|
||||
expect(MockSiemRuleMigrationsService).toHaveBeenCalledWith(logger, kibanaVersion);
|
||||
expect(MockSiemRuleMigrationsService).toHaveBeenCalledWith(logger, kibanaVersion, undefined);
|
||||
});
|
||||
|
||||
describe('when setup is called', () => {
|
||||
|
|
|
@ -21,7 +21,11 @@ export class SiemMigrationsService {
|
|||
|
||||
constructor(private config: ConfigType, logger: LoggerFactory, kibanaVersion: string) {
|
||||
this.pluginStop$ = new ReplaySubject(1);
|
||||
this.rules = new SiemRuleMigrationsService(logger, kibanaVersion);
|
||||
this.rules = new SiemRuleMigrationsService(
|
||||
logger,
|
||||
kibanaVersion,
|
||||
config.siemRuleMigrations?.elserInferenceId
|
||||
);
|
||||
}
|
||||
|
||||
setup(params: SiemMigrationsSetupParams) {
|
||||
|
|
Binary file not shown.
File diff suppressed because it is too large
Load diff
2
x-pack/test/security_solution_api_integration/.gitignore
vendored
Normal file
2
x-pack/test/security_solution_api_integration/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
connector_config.json
|
||||
langsmith_key.txt
|
|
@ -67,6 +67,9 @@
|
|||
"ai4dsoc_cases:server:serverless": "npm run initialize-server:ai4dsoc cases serverless",
|
||||
"ai4dsoc_cases:runner:serverless": "npm run run-tests:ai4dsoc cases serverless serverlessEnv",
|
||||
|
||||
"genai_evals:server:ess": "npm run initialize-server:genai:trial_complete evaluations ess",
|
||||
"genai_evals:runner:ess": "npm run run-tests:genai:trial_complete evaluations ess essEnv",
|
||||
|
||||
"genai_kb_entries:server:serverless": "npm run initialize-server:genai:trial_complete knowledge_base/entries serverless",
|
||||
"genai_kb_entries:runner:serverless": "npm run run-tests:genai:trial_complete knowledge_base/entries serverless serverlessEnv",
|
||||
"genai_kb_entries:qa:serverless": "npm run run-tests:genai:trial_complete knowledge_base/entries serverless qaPeriodicEnv",
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
require('@kbn/babel-register').install();
|
||||
require('./manage_secrets').exportToEnvVars();
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import execa from 'execa';
|
||||
import Path from 'path';
|
||||
import { writeFile, readFile } from 'fs/promises';
|
||||
import { REPO_ROOT } from '@kbn/repo-info';
|
||||
import { schema } from '@kbn/config-schema';
|
||||
|
||||
const SECURITY_GEN_AI_CONNECTORS_ENV_VAR = 'KIBANA_SECURITY_TESTING_AI_CONNECTORS';
|
||||
const SECURITY_GEN_AI_LANGSMITH_KEY_ENV_VAR = 'KIBANA_SECURITY_TESTING_LANGSMITH_KEY';
|
||||
|
||||
// siem-team secrets discussed w/ operations and we will mirror them here
|
||||
// const SECURITY_GEN_AI_VAULT = 'secret/siem-team/security-gen-ai';
|
||||
|
||||
// CI Vault
|
||||
const SECURITY_GEN_AI_VAULT = 'secret/ci/elastic-kibana/security-gen-ai';
|
||||
const SECURITY_GEN_AI_VAULT_CONNECTORS = `${SECURITY_GEN_AI_VAULT}/connectors`;
|
||||
const SECURITY_GEN_AI_VAULT_LANGSMITH = `${SECURITY_GEN_AI_VAULT}/langsmith`;
|
||||
const SECURITY_GEN_AI_CONNECTORS_FIELD = 'config';
|
||||
const SECURITY_GEN_AI_LANGSMITH_FIELD = 'key';
|
||||
const CONNECTOR_FILE = Path.join(
|
||||
REPO_ROOT,
|
||||
'x-pack/test/security_solution_api_integration/scripts/genai/vault/connector_config.json'
|
||||
);
|
||||
const LANGSMITH_FILE = Path.join(
|
||||
REPO_ROOT,
|
||||
'x-pack/test/security_solution_api_integration/scripts/genai/vault/langsmith_key.txt'
|
||||
);
|
||||
|
||||
const connectorsSchema = schema.recordOf(
|
||||
schema.string(),
|
||||
schema.object({
|
||||
name: schema.string(),
|
||||
actionTypeId: schema.string(),
|
||||
config: schema.recordOf(schema.string(), schema.any()),
|
||||
secrets: schema.recordOf(schema.string(), schema.any()),
|
||||
})
|
||||
);
|
||||
|
||||
export interface AvailableConnector {
|
||||
name: string;
|
||||
actionTypeId: string;
|
||||
config: Record<string, unknown>;
|
||||
secrets: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export const retrieveFromVault = async (
|
||||
vault: string,
|
||||
filePath: string,
|
||||
field: string,
|
||||
isJson = true
|
||||
) => {
|
||||
const { stdout } = await execa('vault', ['read', `-field=${field}`, vault], {
|
||||
cwd: REPO_ROOT,
|
||||
buffer: true,
|
||||
});
|
||||
|
||||
const value = Buffer.from(stdout, 'base64').toString('utf-8').trim();
|
||||
const config = isJson ? JSON.stringify(JSON.parse(value), null, 2) : value;
|
||||
|
||||
await writeFile(filePath, config);
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`Config dumped into ${filePath}`);
|
||||
};
|
||||
|
||||
export const retrieveConnectorConfig = async () => {
|
||||
await retrieveFromVault(
|
||||
SECURITY_GEN_AI_VAULT_CONNECTORS,
|
||||
CONNECTOR_FILE,
|
||||
SECURITY_GEN_AI_CONNECTORS_FIELD
|
||||
);
|
||||
};
|
||||
|
||||
export const retrieveLangsmithKey = async () => {
|
||||
await retrieveFromVault(
|
||||
SECURITY_GEN_AI_VAULT_LANGSMITH,
|
||||
LANGSMITH_FILE,
|
||||
SECURITY_GEN_AI_LANGSMITH_FIELD,
|
||||
false
|
||||
);
|
||||
};
|
||||
|
||||
export const formatCurrentConfig = async (filePath: string) => {
|
||||
const config = await readFile(filePath, 'utf-8');
|
||||
const asB64 = Buffer.from(config).toString('base64');
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(asB64);
|
||||
};
|
||||
|
||||
export const uploadToVault = async (vault: string, filePath: string, field: string) => {
|
||||
const config = await readFile(filePath, 'utf-8');
|
||||
const asB64 = Buffer.from(config).toString('base64');
|
||||
|
||||
await execa('vault', ['write', vault, `${field}=${asB64}`], {
|
||||
cwd: REPO_ROOT,
|
||||
buffer: true,
|
||||
});
|
||||
};
|
||||
|
||||
export const uploadConnectorConfigToVault = async () => {
|
||||
await uploadToVault(
|
||||
SECURITY_GEN_AI_VAULT_CONNECTORS,
|
||||
CONNECTOR_FILE,
|
||||
SECURITY_GEN_AI_CONNECTORS_FIELD
|
||||
);
|
||||
};
|
||||
|
||||
export const uploadLangsmithKeyToVault = async () => {
|
||||
await uploadToVault(
|
||||
SECURITY_GEN_AI_VAULT_LANGSMITH,
|
||||
LANGSMITH_FILE,
|
||||
SECURITY_GEN_AI_LANGSMITH_FIELD
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* FOR LOCAL USE ONLY! Export connectors and langsmith secrets from vault to env vars before manually
|
||||
* running evaluations. CI env vars are set by .buildkite/scripts/common/setup_job_env.sh
|
||||
*/
|
||||
export const exportToEnvVars = async () => {
|
||||
const { stdout: connectors } = await execa(
|
||||
'vault',
|
||||
['read', `-field=${SECURITY_GEN_AI_CONNECTORS_FIELD}`, SECURITY_GEN_AI_VAULT_CONNECTORS],
|
||||
{
|
||||
cwd: REPO_ROOT,
|
||||
buffer: true,
|
||||
}
|
||||
);
|
||||
const { stdout: langsmithKey } = await execa(
|
||||
'vault',
|
||||
['read', `-field=${SECURITY_GEN_AI_LANGSMITH_FIELD}`, SECURITY_GEN_AI_VAULT_LANGSMITH],
|
||||
{
|
||||
cwd: REPO_ROOT,
|
||||
buffer: true,
|
||||
}
|
||||
);
|
||||
process.env[SECURITY_GEN_AI_CONNECTORS_ENV_VAR] = connectors;
|
||||
process.env[SECURITY_GEN_AI_LANGSMITH_KEY_ENV_VAR] = langsmithKey;
|
||||
};
|
||||
|
||||
export const loadConnectorsFromEnvVar = (): Record<string, AvailableConnector> => {
|
||||
const connectorsValue = process.env[SECURITY_GEN_AI_CONNECTORS_ENV_VAR];
|
||||
if (!connectorsValue) {
|
||||
return {};
|
||||
}
|
||||
|
||||
let connectors: Record<string, AvailableConnector>;
|
||||
try {
|
||||
connectors = JSON.parse(Buffer.from(connectorsValue, 'base64').toString('utf-8'));
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`Error trying to parse value from ${SECURITY_GEN_AI_CONNECTORS_ENV_VAR} environment variable: ${e.message}`
|
||||
);
|
||||
}
|
||||
return connectorsSchema.validate(connectors);
|
||||
};
|
||||
|
||||
export const loadLangSmithKeyFromEnvVar = (): string | undefined => {
|
||||
const langsmithKeyValue = process.env[SECURITY_GEN_AI_LANGSMITH_KEY_ENV_VAR];
|
||||
if (!langsmithKeyValue) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return Buffer.from(langsmithKeyValue, 'base64').toString('utf-8').trim();
|
||||
};
|
|
@ -0,0 +1,16 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
require('@kbn/babel-register').install();
|
||||
const { retrieveConnectorConfig, retrieveLangsmithKey } = require('./manage_secrets');
|
||||
|
||||
async function retrieveConfigs() {
|
||||
await retrieveConnectorConfig();
|
||||
await retrieveLangsmithKey();
|
||||
}
|
||||
|
||||
retrieveConfigs();
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
require('@kbn/babel-register').install();
|
||||
|
||||
const { uploadConnectorConfigToVault, uploadLangsmithKeyToVault } = require('./manage_secrets');
|
||||
|
||||
async function uploadConfigs() {
|
||||
await uploadConnectorConfigToVault();
|
||||
await uploadLangsmithKeyToVault();
|
||||
}
|
||||
|
||||
uploadConfigs();
|
|
@ -0,0 +1,39 @@
|
|||
## Summary
|
||||
|
||||
Introduces a new `security_solution/gen_ai_evals.yml` BuildKite pipeline for automatically running our Assistant and Attack Discovery evaluation suites weekly.
|
||||
|
||||
### To Run Locally:
|
||||
Ensure you are authenticated with vault for LLM + LangSmith creds:
|
||||
|
||||
> See [internal docs](https://github.com/elastic/infra/blob/master/docs/vault/README.md#login-with-your-okta) for setup/login instructions.
|
||||
|
||||
Fetch Connectors and LangSmith creds:
|
||||
|
||||
> [!NOTE]
|
||||
> In discussion with @elastic/kibana-operations it was preferred to use the ci-prod vault, but they are currently mirrored to `SECURITY_GEN_AI_VAULT` which can be modified manage_secrets.ts so we can self-manage to a degree.
|
||||
|
||||
```
|
||||
cd x-pack/test/security_solution_api_integration
|
||||
node scripts/genai/vault/retrieve_secrets.js
|
||||
```
|
||||
|
||||
|
||||
Navigate to api integration directory, load the env vars, and start server:
|
||||
```
|
||||
cd x-pack/test/security_solution_api_integration
|
||||
export KIBANA_SECURITY_TESTING_AI_CONNECTORS=$(base64 -w 0 < scripts/genai/vault/connector_config.json) && export KIBANA_SECURITY_TESTING_LANGSMITH_KEY=$(base64 -w 0 < scripts/genai/vault/langsmith_key.txt)
|
||||
yarn genai_evals:server:ess
|
||||
```
|
||||
|
||||
Then in another terminal, load vars and run the tests:
|
||||
```
|
||||
cd x-pack/test/security_solution_api_integration
|
||||
export KIBANA_SECURITY_TESTING_AI_CONNECTORS=$(base64 -w 0 < scripts/genai/vault/connector_config.json) && export KIBANA_SECURITY_TESTING_LANGSMITH_KEY=$(base64 -w 0 < scripts/genai/vault/langsmith_key.txt)
|
||||
yarn genai_evals:runner:ess
|
||||
```
|
||||
|
||||
### To manually run on BuildKite:
|
||||
Navigate to [BuildKite](https://buildkite.com/elastic?filter=ftr-security-solution-gen-ai-evaluations) and run `ftr-security-solution-gen-ai-evaluations` pipeline.
|
||||
|
||||
### To manually run on BuildKite for specific PR:
|
||||
Add `ci:security-genai-run-evals` label to PR
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import type { ToolingLog } from '@kbn/tooling-log';
|
||||
import type SuperTest from 'supertest';
|
||||
|
||||
import { createEntry } from '../../knowledge_base/entries/utils/create_entry';
|
||||
import { documentEntry } from '../../knowledge_base/entries/trial_license_complete_tier/mocks/entries';
|
||||
|
||||
export const KB_ENTRIES = [
|
||||
{
|
||||
name: 'Host in NYC data center',
|
||||
text: 'Host "srv-win-east" sits in the New York City data center.',
|
||||
},
|
||||
{
|
||||
name: 'Network configuration',
|
||||
text: 'Here is the configuration of my network:\n```\nhostname Switch01\nip domain-lookup\nline con 0\n no password\n logging synchronous\n login\nenable password cisco\ninterface FastEthernet0/1\n switchport mode access\n switchport access vlan 1\n no shutdown\ninterface FastEthernet0/2\n switchport mode access\n switchport access vlan 1\n no shutdown\ninterface FastEthernet0/3\n switchport mode access\n switchport access vlan 1\n no shutdown\ninterface GigabitEthernet0/2\n switchport mode trunk\n switchport trunk allowed vlan all\n no shutdown\nno spanning-tree vlan 1\ncdp run\nline vty 0 4\n login\n transport input telnet\nno storm-control\nend\nwrite memory\n```',
|
||||
},
|
||||
{
|
||||
name: 'Threat Hunting playbook',
|
||||
text: '```\nThreat Hunting Playbook for Portscans on Windows Hosts Running SentinelOne:\nOverview: Port scanning is a technique used by attackers to identify open ports and services available on a networked device. By identifying open ports, attackers can determine potential vulnerabilities to exploit. Common tools used for port scanning include `nmap` and `PortQry`. Detecting port scanning activities is crucial for identifying potential reconnaissance activities by attackers and preventing subsequent attacks. Why Hunt for Portscans? 1. Early Detection of Reconnaissance: Port scans are often the first step in an attack. Detecting them early can help prevent further exploitation. 2. Identifying Misconfigurations: Port scans can reveal unintended open ports that may indicate misconfigurations. 3. Compliance and Security Posture: Regularly monitoring for port scans helps maintain a strong security posture and ensures compliance with security policies. ES|QL Query: ```esql FROM logs-* METADATA _id, _index, _version | WHERE process.name in ("nmap.exe", "PortQry.exe") and (process.command_line LIKE "*-p*" OR process.command_line LIKE "*-r*") | mv_expand process.args | WHERE (process.args LIKE "*-*" OR process.args LIKE "*:*") AND NOT process.args LIKE "-*" AND NOT process.args LIKE "?:*" AND NOT process.args LIKE "*.*.*.*-*" | DISSECT process.args "%{startp}:%{destp}" | DISSECT process.args "%{startp2}-%{destp2}" | GROK process.command_line "%{GREEDYDATA:content}%{IP:destination.ip}" | EVAL startp = to_integer(startp), destp = to_integer(destp), startp2 = to_integer(startp2), destp2 = to_integer(destp2) | EVAL portcount = CASE( process.name == "nmap.exe", destp2-startp2, destp-startp ) | WHERE portcount > 50 | KEEP @timestamp,_id,_index,_version,user.name,host.hostname,process.name,portcount,process.command_line,destination.ip | LIMIT 10000 ``` Saved Threat Hunting Timeline: [:mag_right: ES|QL - Port Scan]() SentinelOne Console: [SentinelOne Console]() Steps for Threat Hunting 1. Run the ES|QL Query: Execute the provided ES|QL query in Elastic Security to identify potential port scanning activities. 2. Analyze Results: Review the results for any suspicious activities. Pay close attention to the `process.name`, `user.name`, `host.hostname`, `portcount`, and `destination.ip`. 3. Investigate Further: - Process Command Line: Check the command line arguments for any unusual patterns or large port ranges. - User Context: Identify if the user associated with the process should be performing such activities. - Host Context: Determine if the host is expected to run port scanning tools. 4. Correlate with Other Data: Cross-reference with other logs and alerts to identify any related activities or anomalies. 5. Take Action: If malicious activity is confirmed, follow your incident response procedures to contain and mitigate the threat. By following this playbook, you can effectively hunt for and identify port scanning activities on your Windows hosts running SentinelOne, helping to secure your environment against potential threats.\n```',
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Loads Knowledge Base Entries for running evaluations
|
||||
* @param supertest The supertest deps
|
||||
* @param log The tooling logger
|
||||
*/
|
||||
export const loadEvalKnowledgeBaseEntries = async (supertest: SuperTest.Agent, log: ToolingLog) => {
|
||||
for (const entry of KB_ENTRIES) {
|
||||
await createEntry({
|
||||
supertest,
|
||||
log,
|
||||
entry: {
|
||||
...documentEntry,
|
||||
...entry,
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import { FtrConfigProviderContext } from '@kbn/test';
|
||||
import { loadConnectorsFromEnvVar } from '../../../../../scripts/genai/vault/manage_secrets';
|
||||
import { getTinyElserServerArgs } from '../../../knowledge_base/entries/utils/helpers';
|
||||
|
||||
export default async function ({ readConfigFile }: FtrConfigProviderContext) {
|
||||
const functionalConfig = await readConfigFile(
|
||||
require.resolve('../../../../../config/ess/config.base.trial')
|
||||
);
|
||||
|
||||
const preconfiguredConnectors = loadConnectorsFromEnvVar();
|
||||
|
||||
return {
|
||||
...functionalConfig.getAll(),
|
||||
kbnTestServer: {
|
||||
...functionalConfig.get('kbnTestServer'),
|
||||
serverArgs: [
|
||||
...functionalConfig
|
||||
.get('kbnTestServer.serverArgs')
|
||||
// ssl: false as ML vocab API is broken with SSL enabled
|
||||
.filter(
|
||||
(a: string) =>
|
||||
!(
|
||||
a.startsWith('--elasticsearch.hosts=') ||
|
||||
a.startsWith('--elasticsearch.ssl.certificateAuthorities=')
|
||||
)
|
||||
),
|
||||
'--elasticsearch.hosts=http://localhost:9220',
|
||||
`--xpack.actions.preconfigured=${JSON.stringify(preconfiguredConnectors)}`,
|
||||
`--xpack.actions.allowedHosts=["*"]`,
|
||||
`--xpack.securitySolution.enableExperimental=["assistantModelEvaluation"]`,
|
||||
...getTinyElserServerArgs(),
|
||||
],
|
||||
},
|
||||
testFiles: [require.resolve('..')],
|
||||
junit: {
|
||||
reportName: 'GenAI - Knowledge Base Entries Tests - ESS Env - Trial License',
|
||||
},
|
||||
// ssl: false as ML vocab API is broken with SSL enabled
|
||||
servers: {
|
||||
...functionalConfig.get('servers'),
|
||||
elasticsearch: {
|
||||
...functionalConfig.get('servers.elasticsearch'),
|
||||
protocol: 'http',
|
||||
},
|
||||
},
|
||||
esTestCluster: {
|
||||
...functionalConfig.get('esTestCluster'),
|
||||
ssl: false,
|
||||
esJavaOpts: '-Xms4g -Xmx4g',
|
||||
},
|
||||
mochaOpts: {
|
||||
...functionalConfig.get('mochaOpts'),
|
||||
timeout: 360000 * 2,
|
||||
},
|
||||
};
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import {
|
||||
API_VERSIONS,
|
||||
ELASTIC_AI_ASSISTANT_EVALUATE_URL,
|
||||
PostEvaluateBody,
|
||||
} from '@kbn/elastic-assistant-common';
|
||||
import { ELASTIC_HTTP_VERSION_HEADER } from '@kbn/core-http-common';
|
||||
import {
|
||||
loadConnectorsFromEnvVar,
|
||||
loadLangSmithKeyFromEnvVar,
|
||||
} from '../../../../scripts/genai/vault/manage_secrets';
|
||||
import { FtrProviderContext } from '../../../../ftr_provider_context';
|
||||
|
||||
import {
|
||||
clearKnowledgeBase,
|
||||
deleteTinyElser,
|
||||
installTinyElser,
|
||||
setupKnowledgeBase,
|
||||
} from '../../knowledge_base/entries/utils/helpers';
|
||||
|
||||
import { MachineLearningProvider } from '../../../../../functional/services/ml';
|
||||
import { routeWithNamespace } from '../../../../../common/utils/security_solution';
|
||||
import { loadEvalKnowledgeBaseEntries } from '../data/kb_entries';
|
||||
|
||||
export default ({ getService }: FtrProviderContext) => {
|
||||
const supertest = getService('supertest');
|
||||
const log = getService('log');
|
||||
const es = getService('es');
|
||||
const ml = getService('ml') as ReturnType<typeof MachineLearningProvider>;
|
||||
const esArchiver = getService('esArchiver');
|
||||
|
||||
/**
|
||||
* Results will be written to LangSmith for project associated with the langSmithAPIKey, then later
|
||||
* aggregated in the below tracking sheet:
|
||||
* https://docs.google.com/spreadsheets/d/1kDNu98XR4eMIlKNq2iHlx5lVS5Npzm9cyDLyUVroiP8/edit?gid=0#gid=0
|
||||
*
|
||||
* Note: Suite is disabled in `.buildkite/ftr_security_stateful_configs.yml` as it has its own
|
||||
* weekly pipeline located at `.buildkite/pipelines/security_solution/gen_ai_evals.yml`
|
||||
*/
|
||||
describe('@ess Basic Security AI Assistant Evaluations', () => {
|
||||
before(async () => {
|
||||
await installTinyElser({ ml, es, log });
|
||||
await setupKnowledgeBase(supertest, log);
|
||||
await esArchiver.load(
|
||||
'x-pack/test/functional/es_archives/security_solution/attack_discovery_alerts'
|
||||
);
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await deleteTinyElser({ ml, es, log });
|
||||
await esArchiver.unload(
|
||||
'x-pack/test/functional/es_archives/security_solution/attack_discovery_alerts'
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await clearKnowledgeBase(es);
|
||||
});
|
||||
|
||||
describe('Run Evaluations', () => {
|
||||
const buildNumber = process.env.BUILDKITE_BUILD_NUMBER;
|
||||
const defaultEvalPayload: PostEvaluateBody = {
|
||||
runName: `Eval Automation${buildNumber ? ' - ' + buildNumber : ''}`,
|
||||
graphs: ['DefaultAssistantGraph'],
|
||||
datasetName: 'Sample Dataset',
|
||||
connectorIds: Object.keys(loadConnectorsFromEnvVar()),
|
||||
evaluatorConnectorId: 'gpt-4o',
|
||||
alertsIndexPattern: '.alerts-security.alerts-default',
|
||||
replacements: {},
|
||||
screenContext: {
|
||||
timeZone: 'America/Denver',
|
||||
},
|
||||
size: 10,
|
||||
langSmithApiKey: loadLangSmithKeyFromEnvVar(),
|
||||
};
|
||||
|
||||
describe('Security Assistant', () => {
|
||||
it('should successfully run the "ES|QL Generation Regression Suite" dataset', async () => {
|
||||
const evalPayload: PostEvaluateBody = {
|
||||
...defaultEvalPayload,
|
||||
graphs: ['DefaultAssistantGraph'],
|
||||
datasetName: 'ES|QL Generation Regression Suite',
|
||||
};
|
||||
const route = routeWithNamespace(ELASTIC_AI_ASSISTANT_EVALUATE_URL);
|
||||
await supertest
|
||||
.post(route)
|
||||
.set('kbn-xsrf', 'true')
|
||||
.set(ELASTIC_HTTP_VERSION_HEADER, API_VERSIONS.internal.v1)
|
||||
.send(evalPayload)
|
||||
.expect(200);
|
||||
});
|
||||
|
||||
// Uses attack discovery alerts from episodes 1-8
|
||||
it('should successfully run the "Alerts RAG Regression (Episodes 1-8)" dataset', async () => {
|
||||
const evalPayload: PostEvaluateBody = {
|
||||
...defaultEvalPayload,
|
||||
graphs: ['DefaultAssistantGraph'],
|
||||
datasetName: 'Alerts RAG Regression (Episodes 1-8)',
|
||||
};
|
||||
const route = routeWithNamespace(ELASTIC_AI_ASSISTANT_EVALUATE_URL);
|
||||
await supertest
|
||||
.post(route)
|
||||
.set('kbn-xsrf', 'true')
|
||||
.set(ELASTIC_HTTP_VERSION_HEADER, API_VERSIONS.internal.v1)
|
||||
.send(evalPayload)
|
||||
.expect(200);
|
||||
});
|
||||
|
||||
it('should successfully run the "Assistant Eval: Custom Knowledge" dataset', async () => {
|
||||
await loadEvalKnowledgeBaseEntries(supertest, log);
|
||||
const evalPayload: PostEvaluateBody = {
|
||||
...defaultEvalPayload,
|
||||
graphs: ['DefaultAssistantGraph'],
|
||||
datasetName: 'Assistant Eval: Custom Knowledge',
|
||||
};
|
||||
const route = routeWithNamespace(ELASTIC_AI_ASSISTANT_EVALUATE_URL);
|
||||
await supertest
|
||||
.post(route)
|
||||
.set('kbn-xsrf', 'true')
|
||||
.set(ELASTIC_HTTP_VERSION_HEADER, API_VERSIONS.internal.v1)
|
||||
.send(evalPayload)
|
||||
.expect(200);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Attack Discovery', () => {
|
||||
// Note: This LangSmith dataset includes alerts the alert data, so no need to preload the alerts
|
||||
it('should successfully run the "Eval AD: All Scenarios" dataset', async () => {
|
||||
const evalPayload: PostEvaluateBody = {
|
||||
...defaultEvalPayload,
|
||||
graphs: ['DefaultAttackDiscoveryGraph'],
|
||||
datasetName: 'Attack Discovery: Episode 1',
|
||||
};
|
||||
const route = routeWithNamespace(ELASTIC_AI_ASSISTANT_EVALUATE_URL);
|
||||
await supertest
|
||||
.post(route)
|
||||
.set('kbn-xsrf', 'true')
|
||||
.set(ELASTIC_HTTP_VERSION_HEADER, API_VERSIONS.internal.v1)
|
||||
.send(evalPayload)
|
||||
.expect(200);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import { FtrProviderContext } from '../../../../ftr_provider_context';
|
||||
|
||||
export default function ({ loadTestFile, getService }: FtrProviderContext) {
|
||||
describe('GenAI - Evaluations', function () {
|
||||
before(async () => {});
|
||||
|
||||
after(async () => {});
|
||||
|
||||
loadTestFile(require.resolve('./evaluations'));
|
||||
});
|
||||
}
|
|
@ -37,12 +37,12 @@ export default ({ getService }: FtrProviderContext) => {
|
|||
|
||||
describe('@ess Basic Security AI Assistant Knowledge Base Entries', () => {
|
||||
before(async () => {
|
||||
await installTinyElser(ml);
|
||||
await installTinyElser({ es, ml, log });
|
||||
await setupKnowledgeBase(supertest, log);
|
||||
});
|
||||
|
||||
after(async () => {
|
||||
await deleteTinyElser(ml);
|
||||
await deleteTinyElser({ es, ml, log });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
|
|
|
@ -27,32 +27,91 @@ export const TINY_ELSER = {
|
|||
id: SUPPORTED_TRAINED_MODELS.TINY_ELSER.name,
|
||||
};
|
||||
|
||||
export const TINY_ELSER_INFERENCE_ID = `${TINY_ELSER.id}_elasticsearch`;
|
||||
|
||||
/**
|
||||
* Installs `pt_tiny_elser` model for testing Kb features
|
||||
* @param ml
|
||||
*/
|
||||
export const installTinyElser = async (ml: ReturnType<typeof MachineLearningProvider>) => {
|
||||
const config = {
|
||||
...ml.api.getTrainedModelConfig(TINY_ELSER.name),
|
||||
input: {
|
||||
field_names: ['text_field'],
|
||||
},
|
||||
};
|
||||
await ml.api.assureMlStatsIndexExists();
|
||||
await ml.api.importTrainedModel(TINY_ELSER.name, TINY_ELSER.id, config);
|
||||
export const installTinyElser = async ({
|
||||
es,
|
||||
ml,
|
||||
log,
|
||||
}: {
|
||||
es: Client;
|
||||
ml: ReturnType<typeof MachineLearningProvider>;
|
||||
log: ToolingLog;
|
||||
}) => {
|
||||
try {
|
||||
const config = {
|
||||
...ml.api.getTrainedModelConfig(TINY_ELSER.name),
|
||||
input: {
|
||||
field_names: ['text_field'],
|
||||
},
|
||||
};
|
||||
await ml.api.assureMlStatsIndexExists();
|
||||
await ml.api.importTrainedModel(TINY_ELSER.name, TINY_ELSER.id, config);
|
||||
} catch (e) {
|
||||
log.error(`Error installing Tiny Elser: ${e}`);
|
||||
}
|
||||
try {
|
||||
await es.inference.put({
|
||||
task_type: 'sparse_embedding',
|
||||
inference_id: TINY_ELSER_INFERENCE_ID,
|
||||
inference_config: {
|
||||
service: 'elasticsearch',
|
||||
service_settings: {
|
||||
adaptive_allocations: {
|
||||
enabled: true,
|
||||
min_number_of_allocations: 0,
|
||||
max_number_of_allocations: 8,
|
||||
},
|
||||
num_threads: 1,
|
||||
model_id: TINY_ELSER.id,
|
||||
},
|
||||
task_settings: {},
|
||||
},
|
||||
});
|
||||
} catch (e) {
|
||||
log.error(`Error`);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes `pt_tiny_elser` model for testing Kb features
|
||||
* @param ml
|
||||
*/
|
||||
export const deleteTinyElser = async (ml: ReturnType<typeof MachineLearningProvider>) => {
|
||||
export const deleteTinyElser = async ({
|
||||
es,
|
||||
ml,
|
||||
log,
|
||||
}: {
|
||||
es: Client;
|
||||
ml: ReturnType<typeof MachineLearningProvider>;
|
||||
log: ToolingLog;
|
||||
}) => {
|
||||
try {
|
||||
await es.inference.delete({
|
||||
inference_id: TINY_ELSER_INFERENCE_ID,
|
||||
force: true,
|
||||
});
|
||||
} catch (e) {
|
||||
log.error(`Error deleting Tiny Elser Inference endpoint: ${e}`);
|
||||
}
|
||||
await ml.api.stopTrainedModelDeploymentES(TINY_ELSER.id, true);
|
||||
await ml.api.deleteTrainedModelES(TINY_ELSER.id);
|
||||
await ml.api.cleanMlIndices();
|
||||
await ml.testResources.cleanMLSavedObjects();
|
||||
};
|
||||
|
||||
export const getTinyElserServerArgs = () => {
|
||||
return [
|
||||
`--xpack.productDocBase.elserInferenceId=${TINY_ELSER_INFERENCE_ID}`,
|
||||
`--xpack.securitySolution.siemRuleMigrations.elserInferenceId=${TINY_ELSER_INFERENCE_ID}`,
|
||||
`--xpack.elasticAssistant.elserInferenceId=${TINY_ELSER_INFERENCE_ID}`,
|
||||
];
|
||||
};
|
||||
|
||||
/**
|
||||
* Setup Knowledge Base
|
||||
* @param supertest The supertest deps
|
||||
|
|
|
@ -55,5 +55,7 @@
|
|||
"@kbn/openapi-common",
|
||||
"@kbn/scout-info",
|
||||
"@kbn/security-plugin-types-common",
|
||||
"@kbn/babel-register",
|
||||
"@kbn/config-schema",
|
||||
]
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue