mirror of
https://github.com/elastic/kibana.git
synced 2025-06-27 18:51:07 -04:00
ESS support for FTR serverless tests. SSL support in kbn/es. kbn/es DX improvements. (#162673)
Closes #162593 Closes #163939 Closes #162625 The original intention of this PR was to add FTR support for ESS. However the scope increased as that also required adding SSL support due to tests failing from disabled `security` and no authentication. Additionally, after using serverless in `kbn/es` extensively for this, there was a bit of friction in regards to DX. ## Summary - Switch `x-pack/test_serverless` FTR to use ES serverless instead of (stateful) snapshot - Adds SSL support to Docker and Serverless in `kbn/es` - Adds `port` option override - Adds `teardown` option to kill running nodes if the process exits without shutdown - Adds `kill` option to kill running nodes on startup if detected - Adds `--esFrom serverless` to FTR CLI - Adds `files` option to mount extra files into containers - For serverless, automatically attach to first node with `docker logs -f es01` on startup for better DX. - Added `background` flag to not attach `logs`. - Adds graceful shutdown for ESS cluster - Separate `docker pull` from `run` for better logging, ensures latest image and stops multiple pulls of the same image occurring in parallel - Align (most) default settings for ES serverless with `gradlew` [settings](https://github.com/elastic/elasticsearch-serverless/blob/main/serverless-build-tools/src/main/kotlin/elasticsearch.serverless-run.gradle.kts#L8) - Fixes Docker bind mount permissions in CI - Fixes issue where `esFrom` would default to `snapshot` and override FTR config settings. ### Checklist - [x] [Documentation](https://www.elastic.co/guide/en/kibana/master/development-documentation.html) was added for features that require explanation or tutorials - [x] [Unit or functional tests](https://www.elastic.co/guide/en/kibana/master/development-tests.html) were updated or added to match the most common scenarios ## Related Issues for Skipped Tests Security Threat Hunting: #165135 Observability: #165138 Response Ops: #165145 --------- Co-authored-by: Dzmitry Lemechko <dzmitry.lemechko@elastic.co> Co-authored-by: Tiago Costa <tiago.costa@elastic.co> Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com> Co-authored-by: Patryk Kopycinski <contact@patrykkopycinski.com>
This commit is contained in:
parent
681c2e9cf7
commit
06ebc3120c
85 changed files with 2427 additions and 622 deletions
|
@ -110,19 +110,20 @@ steps:
|
|||
artifact_paths:
|
||||
- "target/kibana-security-solution/**/*"
|
||||
|
||||
- command: .buildkite/scripts/steps/functional/security_serverless_defend_workflows.sh
|
||||
label: 'Serverless Security Defend Workflows Cypress Tests'
|
||||
agents:
|
||||
queue: n2-4-spot
|
||||
depends_on: build
|
||||
timeout_in_minutes: 40
|
||||
soft_fail: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: '*'
|
||||
limit: 1
|
||||
artifact_paths:
|
||||
- "target/kibana-security-solution/**/*"
|
||||
# status_exception: Native role management is not enabled in this Elasticsearch instance
|
||||
# - command: .buildkite/scripts/steps/functional/security_serverless_defend_workflows.sh
|
||||
# label: 'Serverless Security Defend Workflows Cypress Tests'
|
||||
# agents:
|
||||
# queue: n2-4-spot
|
||||
# depends_on: build
|
||||
# timeout_in_minutes: 40
|
||||
# soft_fail: true
|
||||
# retry:
|
||||
# automatic:
|
||||
# - exit_status: '*'
|
||||
# limit: 1
|
||||
# artifact_paths:
|
||||
# - "target/kibana-security-solution/**/*"
|
||||
|
||||
- command: .buildkite/scripts/steps/functional/security_serverless_investigations.sh
|
||||
label: 'Serverless Security Investigations Cypress Tests'
|
||||
|
|
|
@ -4,7 +4,7 @@ steps:
|
|||
agents:
|
||||
queue: n2-4-spot
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
timeout_in_minutes: 60
|
||||
parallelism: 2
|
||||
retry:
|
||||
automatic:
|
||||
|
@ -18,7 +18,7 @@ steps:
|
|||
agents:
|
||||
queue: n2-4-virt
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
timeout_in_minutes: 60
|
||||
parallelism: 6
|
||||
retry:
|
||||
automatic:
|
||||
|
|
|
@ -25,16 +25,17 @@ steps:
|
|||
artifact_paths:
|
||||
- "target/kibana-osquery/**/*"
|
||||
|
||||
- command: .buildkite/scripts/steps/functional/security_serverless_osquery.sh
|
||||
label: 'Serverless Osquery Cypress Tests'
|
||||
agents:
|
||||
queue: n2-4-spot
|
||||
depends_on: build
|
||||
timeout_in_minutes: 50
|
||||
parallelism: 6
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: '*'
|
||||
limit: 1
|
||||
artifact_paths:
|
||||
- "target/kibana-osquery/**/*"
|
||||
# Error: self-signed certificate in certificate chain
|
||||
# - command: .buildkite/scripts/steps/functional/security_serverless_osquery.sh
|
||||
# label: 'Serverless Osquery Cypress Tests'
|
||||
# agents:
|
||||
# queue: n2-4-spot
|
||||
# depends_on: build
|
||||
# timeout_in_minutes: 50
|
||||
# parallelism: 6
|
||||
# retry:
|
||||
# automatic:
|
||||
# - exit_status: '*'
|
||||
# limit: 1
|
||||
# artifact_paths:
|
||||
# - "target/kibana-osquery/**/*"
|
||||
|
|
|
@ -21,3 +21,7 @@ if [[ -d "$cacheDir" ]]; then
|
|||
fi
|
||||
|
||||
is_test_execution_step
|
||||
|
||||
# logins into docker as a common step for functional tests
|
||||
echo "$KIBANA_DOCKER_PASSWORD" | docker login -u "$KIBANA_DOCKER_USERNAME" --password-stdin docker.elastic.co
|
||||
trap 'docker logout docker.elastic.co' EXIT
|
||||
|
|
|
@ -2,10 +2,13 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/util.sh
|
||||
source .buildkite/scripts/steps/functional/common.sh
|
||||
source .buildkite/scripts/steps/functional/common_cypress.sh
|
||||
|
||||
.buildkite/scripts/bootstrap.sh
|
||||
# TODO: remove the line below to use build artifacts for tests.
|
||||
# in addition to remove the line, we will have to expose the kibana install dir into the downloaded build location
|
||||
# by exporting a var like:
|
||||
# export KIBANA_INSTALL_DIR=${KIBANA_BUILD_LOCATION}
|
||||
node scripts/build_kibana_platform_plugins.js
|
||||
|
||||
export JOB=kibana-osquery-cypress-serverless
|
||||
|
|
|
@ -19,6 +19,7 @@ export {
|
|||
KBN_P12_PATH,
|
||||
KBN_P12_PASSWORD,
|
||||
} from './src/certs';
|
||||
export * from './src/dev_service_account';
|
||||
export * from './src/axios';
|
||||
export * from './src/plugin_list';
|
||||
export * from './src/streams';
|
||||
|
|
19
packages/kbn-dev-utils/src/dev_service_account.ts
Normal file
19
packages/kbn-dev-utils/src/dev_service_account.ts
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
const env = process.env;
|
||||
|
||||
/**
|
||||
* `kibana-dev` service account token for connecting to ESS
|
||||
* See packages/kbn-es/src/ess_resources/README.md
|
||||
*/
|
||||
export const kibanaDevServiceAccount = {
|
||||
token:
|
||||
env.TEST_KIBANA_SERVICE_ACCOUNT_TOKEN ||
|
||||
'AAEAAWVsYXN0aWMva2liYW5hL2tpYmFuYS1kZXY6VVVVVVVVTEstKiBaNA',
|
||||
};
|
|
@ -8,4 +8,9 @@
|
|||
|
||||
export { run } from './src/cli';
|
||||
export { Cluster } from './src/cluster';
|
||||
export { SYSTEM_INDICES_SUPERUSER } from './src/utils';
|
||||
export {
|
||||
SYSTEM_INDICES_SUPERUSER,
|
||||
ELASTIC_SERVERLESS_SUPERUSER,
|
||||
ELASTIC_SERVERLESS_SUPERUSER_PASSWORD,
|
||||
getDockerFileMountPath,
|
||||
} from './src/utils';
|
||||
|
|
|
@ -12,7 +12,7 @@ import { ToolingLog } from '@kbn/tooling-log';
|
|||
import { getTimeReporter } from '@kbn/ci-stats-reporter';
|
||||
|
||||
import { Cluster } from '../cluster';
|
||||
import { DOCKER_IMG, DOCKER_REPO, DOCKER_TAG } from '../utils';
|
||||
import { DOCKER_IMG, DOCKER_REPO, DOCKER_TAG, DEFAULT_PORT } from '../utils';
|
||||
import { Command } from './types';
|
||||
|
||||
export const docker: Command = {
|
||||
|
@ -27,8 +27,12 @@ export const docker: Command = {
|
|||
--tag Image tag of ES to run from ${DOCKER_REPO} [default: ${DOCKER_TAG}]
|
||||
--image Full path to image of ES to run, has precedence over tag. [default: ${DOCKER_IMG}]
|
||||
--password Sets password for elastic user [default: ${password}]
|
||||
--port The port to bind to on 127.0.0.1 [default: ${DEFAULT_PORT}]
|
||||
--ssl Sets up SSL on Elasticsearch
|
||||
--kill Kill running ES nodes if detected
|
||||
-E Additional key=value settings to pass to Elasticsearch
|
||||
-D Override Docker command
|
||||
-F Absolute paths for files to mount into container
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -50,9 +54,11 @@ export const docker: Command = {
|
|||
alias: {
|
||||
esArgs: 'E',
|
||||
dockerCmd: 'D',
|
||||
files: 'F',
|
||||
},
|
||||
|
||||
string: ['tag', 'image', 'D'],
|
||||
boolean: ['ssl', 'kill'],
|
||||
|
||||
default: defaults,
|
||||
});
|
||||
|
|
|
@ -12,7 +12,7 @@ import { ToolingLog } from '@kbn/tooling-log';
|
|||
import { getTimeReporter } from '@kbn/ci-stats-reporter';
|
||||
|
||||
import { Cluster } from '../cluster';
|
||||
import { SERVERLESS_REPO, SERVERLESS_TAG, SERVERLESS_IMG } from '../utils';
|
||||
import { SERVERLESS_REPO, SERVERLESS_TAG, SERVERLESS_IMG, DEFAULT_PORT } from '../utils';
|
||||
import { Command } from './types';
|
||||
|
||||
export const serverless: Command = {
|
||||
|
@ -22,10 +22,15 @@ export const serverless: Command = {
|
|||
return dedent`
|
||||
Options:
|
||||
|
||||
--tag Image tag of ES Serverless to run from ${SERVERLESS_REPO} [default: ${SERVERLESS_TAG}]
|
||||
--image Full path of ES Serverless image to run, has precedence over tag. [default: ${SERVERLESS_IMG}]
|
||||
--tag Image tag of ESS to run from ${SERVERLESS_REPO} [default: ${SERVERLESS_TAG}]
|
||||
--image Full path of ESS image to run, has precedence over tag. [default: ${SERVERLESS_IMG}]
|
||||
--clean Remove existing file system object store before running
|
||||
--port The port to bind to on 127.0.0.1 [default: ${DEFAULT_PORT}]
|
||||
--ssl Sets up SSL on Elasticsearch
|
||||
--kill Kill running ESS nodes if detected
|
||||
--background Start ESS without attaching to the first node's logs
|
||||
-E Additional key=value settings to pass to Elasticsearch
|
||||
-F Absolute paths for files to mount into containers
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -46,10 +51,11 @@ export const serverless: Command = {
|
|||
alias: {
|
||||
basePath: 'base-path',
|
||||
esArgs: 'E',
|
||||
files: 'F',
|
||||
},
|
||||
|
||||
string: ['tag', 'image'],
|
||||
boolean: ['clean'],
|
||||
boolean: ['clean', 'ssl', 'kill', 'background'],
|
||||
|
||||
default: defaults,
|
||||
});
|
||||
|
|
|
@ -16,13 +16,15 @@ const { Client } = require('@elastic/elasticsearch');
|
|||
const { downloadSnapshot, installSnapshot, installSource, installArchive } = require('./install');
|
||||
const { ES_BIN, ES_PLUGIN_BIN, ES_KEYSTORE_BIN } = require('./paths');
|
||||
const {
|
||||
log: defaultLog,
|
||||
parseEsLog,
|
||||
extractConfigFiles,
|
||||
log: defaultLog,
|
||||
NativeRealm,
|
||||
parseEsLog,
|
||||
parseTimeoutToMs,
|
||||
runServerlessCluster,
|
||||
runDockerContainer,
|
||||
runServerlessCluster,
|
||||
stopServerlessCluster,
|
||||
teardownServerlessClusterSync,
|
||||
} = require('./utils');
|
||||
const { createCliError } = require('./errors');
|
||||
const { promisify } = require('util');
|
||||
|
@ -276,6 +278,10 @@ exports.Cluster = class Cluster {
|
|||
}
|
||||
this._stopCalled = true;
|
||||
|
||||
if (this._serverlessNodes?.length) {
|
||||
return await stopServerlessCluster(this._log, this._serverlessNodes);
|
||||
}
|
||||
|
||||
if (!this._process || !this._outcome) {
|
||||
throw new Error('ES has not been started');
|
||||
}
|
||||
|
@ -295,6 +301,10 @@ exports.Cluster = class Cluster {
|
|||
|
||||
this._stopCalled;
|
||||
|
||||
if (this._serverlessNodes?.length) {
|
||||
return await stopServerlessCluster(this._log, this._serverlessNodes);
|
||||
}
|
||||
|
||||
if (!this._process || !this._outcome) {
|
||||
throw new Error('ES has not been started');
|
||||
}
|
||||
|
@ -573,7 +583,15 @@ exports.Cluster = class Cluster {
|
|||
throw new Error('ES has already been started');
|
||||
}
|
||||
|
||||
await runServerlessCluster(this._log, options);
|
||||
this._serverlessNodes = await runServerlessCluster(this._log, options);
|
||||
|
||||
if (options.teardown) {
|
||||
/**
|
||||
* Ideally would be async and an event like beforeExit or SIGINT,
|
||||
* but those events are not being triggered in FTR child process.
|
||||
*/
|
||||
process.on('exit', () => teardownServerlessClusterSync(this._log, options));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
49
packages/kbn-es/src/ess_resources/README.md
Normal file
49
packages/kbn-es/src/ess_resources/README.md
Normal file
|
@ -0,0 +1,49 @@
|
|||
# Elasticsearch Serverless Resources
|
||||
The resources in this directory are used for seeding Elasticsearch Serverless (ESS) images with users, roles and tokens for SSL and authentication. ESS requires file realm authentication, so we will bind mount them into the containers at `/usr/share/elasticsearch/config/`.
|
||||
|
||||
## Users
|
||||
|
||||
### Default user
|
||||
|
||||
The default superuser authentication to login to Kibana is:
|
||||
|
||||
```
|
||||
username: elastic_serverless
|
||||
password: changeme
|
||||
```
|
||||
|
||||
### Adding users
|
||||
|
||||
1. Add the user:encrypted_password to `users` file. The encrypted password for `elastic_serverless` is `changeme` if you want to reuse the value.
|
||||
1. Set the new user's roles in `users_roles` file.
|
||||
1. Add the username to `operator_users.yml` in the array for file realm users.
|
||||
|
||||
|
||||
## Service Account and Tokens
|
||||
|
||||
This section for Service Accounts was originally from the [ESS repository](https://github.com/elastic/elasticsearch-serverless/blob/main/serverless-build-tools/src/main/resources/README.service_tokens.md).
|
||||
|
||||
The "service_tokens" file contains this line:
|
||||
```
|
||||
elastic/kibana/kibana-dev:$2a$10$mY2RuGROhk56vLNh.Mgwue98BnkdQPlTR.yGh38ao5jhPJobvuBCq
|
||||
```
|
||||
|
||||
That line defines a single service token
|
||||
- For the `elastic/kibana` service account
|
||||
- The token is named `kibana-dev`
|
||||
- The token's secret is hashed using bcrypt (`$2a$`) using `10` rounds
|
||||
|
||||
Although Elasticsearch used PBKDF2_STRETCH by default, the k8s controller
|
||||
creates tokens using bcrypt, so we mimic that here.
|
||||
|
||||
The hash is not reversible, so this README is here to tell you what the secret is.
|
||||
The secret value is: `UUUUUULK-* Z4`
|
||||
That produces an encoded token of: `AAEAAWVsYXN0aWMva2liYW5hL2tpYmFuYS1kZXY6VVVVVVVVTEstKiBaNA`
|
||||
Yes, the secret was specially chosen to produce an encoded value that can be more easily recognised in development.
|
||||
|
||||
If a node is configured to use this `service_tokens` file, then you can authenticate to it with
|
||||
```
|
||||
curl -H "Authorization: Bearer AAEAAWVsYXN0aWMva2liYW5hL2tpYmFuYS1kZXY6VVVVVVVVTEstKiBaNA" http://localhost:9200/_security/_authenticate
|
||||
```
|
||||
|
||||
The name of the token (`kibana-dev`) is important because the `operator_users.yml` file designates that token as an operator and allows us to seed an ESS cluster with this token.
|
10
packages/kbn-es/src/ess_resources/jwks.json
Normal file
10
packages/kbn-es/src/ess_resources/jwks.json
Normal file
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"keys": [
|
||||
{
|
||||
"kty": "RSA",
|
||||
"e": "AQAB",
|
||||
"use": "sig",
|
||||
"n": "v9-88aGdE4E85PuEycxTA6LkM3TBvNScoeP6A-dd0Myo6-LfBlp1r7BPBWmvi_SC6Zam3U1LE3AekDMwqJg304my0pvh8wOwlmRpgKXDXjvj4s59vdeVNhCB9doIthUABd310o9lyb55fWc_qQYE2LK9AyEjicJswafguH6txV4IwSl13ieZAxni0Ca4CwdzXO1Oi34XjHF8F5x_0puTaQzHn5bPG4fiIJN-pwie0Ba4VEDPO5ca4lLXWVi1bn8xMDTAULrBAXJwDaDdS05KMbc4sPlyQPhtY1gcYvUbozUPYxSWwA7fZgFzV_h-uy_oXf1EXttOxSgog1z3cJzf6Q"
|
||||
}
|
||||
]
|
||||
}
|
9
packages/kbn-es/src/ess_resources/operator_users.yml
Normal file
9
packages/kbn-es/src/ess_resources/operator_users.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
operator:
|
||||
- usernames: ["elastic_serverless", "system_indices_superuser"]
|
||||
realm_type: "file"
|
||||
auth_type: "realm"
|
||||
- usernames: [ "elastic/kibana" ]
|
||||
realm_type: "_service_account"
|
||||
auth_type: "token"
|
||||
token_source: "file"
|
||||
token_names: [ "kibana-dev" ]
|
14
packages/kbn-es/src/ess_resources/role_mapping.yml
Normal file
14
packages/kbn-es/src/ess_resources/role_mapping.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
# Role mapping configuration file which has elasticsearch roles as keys
|
||||
# that map to one or more user or group distinguished names
|
||||
|
||||
#roleA: this is an elasticsearch role
|
||||
# - groupA-DN this is a group distinguished name
|
||||
# - groupB-DN
|
||||
# - user1-DN this is the full user distinguished name
|
||||
|
||||
#power_user:
|
||||
# - "cn=admins,dc=example,dc=com"
|
||||
#user:
|
||||
# - "cn=users,dc=example,dc=com"
|
||||
# - "cn=admins,dc=example,dc=com"
|
||||
# - "cn=John Doe,cn=other users,dc=example,dc=com"
|
792
packages/kbn-es/src/ess_resources/roles.yml
Normal file
792
packages/kbn-es/src/ess_resources/roles.yml
Normal file
|
@ -0,0 +1,792 @@
|
|||
---
|
||||
system_indices_superuser:
|
||||
cluster: ['all']
|
||||
indices:
|
||||
- names: ['*']
|
||||
privileges: ['all']
|
||||
allow_restricted_indices: true
|
||||
applications:
|
||||
- application: '*'
|
||||
privileges: ['*']
|
||||
resources: ['*']
|
||||
run_as: ['*']
|
||||
|
||||
# -----
|
||||
# Source: https://github.com/elastic/project-controller/blob/main/internal/project/observability/config/roles.yml
|
||||
# and: https://github.com/elastic/project-controller/blob/main/internal/project/esproject/config/roles.yml
|
||||
# -----
|
||||
viewer:
|
||||
cluster: []
|
||||
indices:
|
||||
- names:
|
||||
- "*"
|
||||
privileges:
|
||||
- read
|
||||
- names:
|
||||
- "/~(([.]|ilm-history-).*)/"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
allow_restricted_indices: false
|
||||
- names:
|
||||
- ".siem-signals*"
|
||||
- ".lists-*"
|
||||
- ".items-*"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
allow_restricted_indices: false
|
||||
- names:
|
||||
- ".alerts*"
|
||||
- ".preview.alerts*"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
allow_restricted_indices: false
|
||||
applications:
|
||||
- application: "kibana-.kibana"
|
||||
privileges:
|
||||
- "read"
|
||||
resources:
|
||||
- "*"
|
||||
run_as: []
|
||||
editor:
|
||||
cluster: []
|
||||
indices:
|
||||
- names:
|
||||
- "/~(([.]|ilm-history-).*)/"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
allow_restricted_indices: false
|
||||
- names:
|
||||
- "observability-annotations"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
- "write"
|
||||
allow_restricted_indices: false
|
||||
- names:
|
||||
- ".siem-signals*"
|
||||
- ".lists-*"
|
||||
- ".items-*"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
- "write"
|
||||
- "maintenance"
|
||||
allow_restricted_indices: false
|
||||
- names:
|
||||
- ".internal.alerts*"
|
||||
- ".alerts*"
|
||||
- ".internal.preview.alerts*"
|
||||
- ".preview.alerts*"
|
||||
privileges:
|
||||
- "read"
|
||||
- "view_index_metadata"
|
||||
- "write"
|
||||
- "maintenance"
|
||||
allow_restricted_indices: false
|
||||
applications:
|
||||
- application: "kibana-.kibana"
|
||||
privileges:
|
||||
- "all"
|
||||
resources:
|
||||
- "*"
|
||||
run_as: []
|
||||
|
||||
# -----
|
||||
# Source: https://github.com/elastic/project-controller/blob/main/internal/project/security/config/roles.yml
|
||||
# -----
|
||||
t1_analyst:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- ".alerts-security*"
|
||||
- ".siem-signals-*"
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- maintenance
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- ".fleet-agents*"
|
||||
- ".fleet-actions*"
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- read
|
||||
- read_alerts
|
||||
- endpoint_list_read
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- read
|
||||
- run_saved_queries
|
||||
resources: "*"
|
||||
|
||||
t2_analyst:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- maintenance
|
||||
- names:
|
||||
- .lists*
|
||||
- .items*
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- read
|
||||
- read_alerts
|
||||
- endpoint_list_read
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- read
|
||||
- run_saved_queries
|
||||
resources: "*"
|
||||
|
||||
t3_analyst:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- .lists*
|
||||
- .items*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- crud_alerts
|
||||
- endpoint_list_all
|
||||
- trusted_applications_all
|
||||
- event_filters_all
|
||||
- host_isolation_exceptions_all
|
||||
- blocklist_all
|
||||
- policy_management_read # Elastic Defend Policy Management
|
||||
- host_isolation_all
|
||||
- process_operations_all
|
||||
- actions_log_management_all # Response actions history
|
||||
- file_operations_all
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
threat_intelligence_analyst:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- .lists*
|
||||
- .items*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
privileges:
|
||||
- read
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- maintenance
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- read
|
||||
- read_alerts
|
||||
- endpoint_list_read
|
||||
- blocklist_all
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
rule_author:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
- .internal.preview.alerts-security*
|
||||
- .preview.alerts-security*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- maintenance
|
||||
- view_index_metadata
|
||||
- names:
|
||||
- .lists*
|
||||
- .items*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- crud_alerts
|
||||
- policy_management_all
|
||||
- endpoint_list_all
|
||||
- trusted_applications_all
|
||||
- event_filters_all
|
||||
- host_isolation_exceptions_read
|
||||
- blocklist_all
|
||||
- actions_log_management_read
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
soc_manager:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
- .preview.alerts-security*
|
||||
- .internal.preview.alerts-security*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- manage
|
||||
- names:
|
||||
- .lists*
|
||||
- .items*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- crud_alerts
|
||||
- policy_management_all
|
||||
- endpoint_list_all
|
||||
- trusted_applications_all
|
||||
- event_filters_all
|
||||
- host_isolation_exceptions_all
|
||||
- blocklist_all
|
||||
- host_isolation_all
|
||||
- process_operations_all
|
||||
- actions_log_management_all
|
||||
- file_operations_all
|
||||
- execute_operations_all
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: savedObjectsManagement
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
detections_admin:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
- .lists*
|
||||
- .items*
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
- .preview.alerts-security*
|
||||
- .internal.preview.alerts-security*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- manage
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- crud_alerts
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: dev_tools
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
platform_engineer:
|
||||
cluster:
|
||||
- manage
|
||||
indices:
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
- .lists*
|
||||
- .items*
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
- .preview.alerts-security*
|
||||
- .internal.preview.alerts-security*
|
||||
privileges:
|
||||
- all
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- crud_alerts
|
||||
- policy_management_all
|
||||
- endpoint_list_all
|
||||
- trusted_applications_all
|
||||
- event_filters_all
|
||||
- host_isolation_exceptions_all
|
||||
- blocklist_all
|
||||
- actions_log_management_read
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: fleet
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: fleetv2
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
endpoint_operations_analyst:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
- .lists*
|
||||
- .items*
|
||||
privileges:
|
||||
- read
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
- .preview.alerts-security*
|
||||
- .internal.preview.alerts-security*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- policy_management_all
|
||||
- endpoint_list_all
|
||||
- trusted_applications_all
|
||||
- event_filters_all
|
||||
- host_isolation_exceptions_all
|
||||
- blocklist_all
|
||||
- host_isolation_all
|
||||
- process_operations_all
|
||||
- actions_log_management_all # Response History
|
||||
- file_operations_all
|
||||
- execute_operations_all # Execute
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: fleet
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: fleetv2
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
|
||||
endpoint_policy_manager:
|
||||
cluster:
|
||||
indices:
|
||||
- names:
|
||||
- metrics-endpoint.metadata_current_*
|
||||
- .fleet-agents*
|
||||
- .fleet-actions*
|
||||
privileges:
|
||||
- read
|
||||
- names:
|
||||
- apm-*-transaction*
|
||||
- traces-apm*
|
||||
- auditbeat-*
|
||||
- endgame-*
|
||||
- filebeat-*
|
||||
- logs-*
|
||||
- packetbeat-*
|
||||
- winlogbeat-*
|
||||
- .lists*
|
||||
- .items*
|
||||
privileges:
|
||||
- read
|
||||
- names:
|
||||
- .alerts-security*
|
||||
- .siem-signals-*
|
||||
- .preview.alerts-security*
|
||||
- .internal.preview.alerts-security*
|
||||
privileges:
|
||||
- read
|
||||
- write
|
||||
- manage
|
||||
applications:
|
||||
- application: ml
|
||||
privileges:
|
||||
- read
|
||||
resources: "*"
|
||||
- application: siem
|
||||
privileges:
|
||||
- all
|
||||
- read_alerts
|
||||
- crud_alerts
|
||||
- policy_management_all
|
||||
- trusted_applications_all
|
||||
- event_filters_all
|
||||
- host_isolation_exceptions_all
|
||||
- blocklist_all
|
||||
- endpoint_list_all
|
||||
resources: "*"
|
||||
- application: securitySolutionCases
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: actions
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: builtInAlerts
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: osquery
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: fleet
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: fleetv2
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
||||
- application: spaces
|
||||
privileges:
|
||||
- all
|
||||
resources: "*"
|
11
packages/kbn-es/src/ess_resources/secrets.json
Normal file
11
packages/kbn-es/src/ess_resources/secrets.json
Normal file
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"metadata": {
|
||||
"version": "1",
|
||||
"compatibility": "8.11.0"
|
||||
},
|
||||
"string_secrets": {
|
||||
"xpack.security.http.ssl.keystore.secure_password": "storepass",
|
||||
"xpack.security.transport.ssl.keystore.secure_password": "storepass",
|
||||
"xpack.security.authc.realms.jwt.jwt1.client_authentication.shared_secret": "my_super_secret"
|
||||
}
|
||||
}
|
1
packages/kbn-es/src/ess_resources/service_tokens
Normal file
1
packages/kbn-es/src/ess_resources/service_tokens
Normal file
|
@ -0,0 +1 @@
|
|||
elastic/kibana/kibana-dev:$2a$10$mY2RuGROhk56vLNh.Mgwue98BnkdQPlTR.yGh38ao5jhPJobvuBCq
|
2
packages/kbn-es/src/ess_resources/users
Normal file
2
packages/kbn-es/src/ess_resources/users
Normal file
|
@ -0,0 +1,2 @@
|
|||
elastic_serverless:$2a$10$nN6sRtQl2KX9Gn8kV/.NpOLSk6Jwn8TehEDnZ7aaAgzyl/dy5PYzW
|
||||
system_indices_superuser:$2a$10$nN6sRtQl2KX9Gn8kV/.NpOLSk6Jwn8TehEDnZ7aaAgzyl/dy5PYzW
|
2
packages/kbn-es/src/ess_resources/users_roles
Normal file
2
packages/kbn-es/src/ess_resources/users_roles
Normal file
|
@ -0,0 +1,2 @@
|
|||
superuser:elastic_serverless
|
||||
system_indices_superuser:system_indices_superuser
|
|
@ -7,7 +7,7 @@
|
|||
*/
|
||||
|
||||
import Os from 'os';
|
||||
import Path from 'path';
|
||||
import { resolve } from 'path';
|
||||
|
||||
function maybeUseBat(bin: string) {
|
||||
return Os.platform().startsWith('win') ? `${bin}.bat` : bin;
|
||||
|
@ -15,7 +15,7 @@ function maybeUseBat(bin: string) {
|
|||
|
||||
const tempDir = Os.tmpdir();
|
||||
|
||||
export const BASE_PATH = Path.resolve(tempDir, 'kbn-es');
|
||||
export const BASE_PATH = resolve(tempDir, 'kbn-es');
|
||||
|
||||
export const GRADLE_BIN = maybeUseBat('./gradlew');
|
||||
export const ES_BIN = maybeUseBat('bin/elasticsearch');
|
||||
|
@ -23,3 +23,30 @@ export const ES_PLUGIN_BIN = maybeUseBat('bin/elasticsearch-plugin');
|
|||
export const ES_CONFIG = 'config/elasticsearch.yml';
|
||||
|
||||
export const ES_KEYSTORE_BIN = maybeUseBat('./bin/elasticsearch-keystore');
|
||||
|
||||
export const ESS_OPERATOR_USERS_PATH = resolve(__dirname, './ess_resources/operator_users.yml');
|
||||
export const ESS_SERVICE_TOKENS_PATH = resolve(__dirname, './ess_resources/service_tokens');
|
||||
|
||||
export const ESS_USERS_PATH = resolve(__dirname, './ess_resources/users');
|
||||
export const ESS_USERS_ROLES_PATH = resolve(__dirname, './ess_resources/users_roles');
|
||||
|
||||
export const ESS_ROLES_PATH = resolve(__dirname, './ess_resources/roles.yml');
|
||||
export const ESS_ROLE_MAPPING_PATH = resolve(__dirname, './ess_resources/role_mapping.yml');
|
||||
|
||||
export const ESS_SECRETS_PATH = resolve(__dirname, './ess_resources/secrets.json');
|
||||
|
||||
export const ESS_JWKS_PATH = resolve(__dirname, './ess_resources/jwks.json');
|
||||
|
||||
export const ESS_RESOURCES_PATHS = [
|
||||
ESS_OPERATOR_USERS_PATH,
|
||||
ESS_ROLE_MAPPING_PATH,
|
||||
ESS_ROLES_PATH,
|
||||
ESS_SERVICE_TOKENS_PATH,
|
||||
ESS_USERS_PATH,
|
||||
ESS_USERS_ROLES_PATH,
|
||||
];
|
||||
|
||||
export const ESS_CONFIG_PATH = '/usr/share/elasticsearch/config/';
|
||||
|
||||
// Files need to be inside config for permissions reasons inside the container
|
||||
export const ESS_FILES_PATH = `${ESS_CONFIG_PATH}files/`;
|
||||
|
|
|
@ -12,18 +12,25 @@ import { stat } from 'fs/promises';
|
|||
|
||||
import {
|
||||
DOCKER_IMG,
|
||||
detectRunningNodes,
|
||||
maybeCreateDockerNetwork,
|
||||
maybePullDockerImage,
|
||||
resolveDockerCmd,
|
||||
resolveDockerImage,
|
||||
resolveEsArgs,
|
||||
resolvePort,
|
||||
runDockerContainer,
|
||||
runServerlessCluster,
|
||||
runServerlessEsNode,
|
||||
SERVERLESS_IMG,
|
||||
setupServerlessVolumes,
|
||||
stopServerlessCluster,
|
||||
teardownServerlessClusterSync,
|
||||
verifyDockerInstalled,
|
||||
} from './docker';
|
||||
import { ToolingLog, ToolingLogCollectingWriter } from '@kbn/tooling-log';
|
||||
import { ES_P12_PATH } from '@kbn/dev-utils';
|
||||
import { ESS_RESOURCES_PATHS } from '../paths';
|
||||
|
||||
jest.mock('execa');
|
||||
const execa = jest.requireMock('execa');
|
||||
|
@ -62,7 +69,7 @@ const volumeCmdTest = async (volumeCmd: string[]) => {
|
|||
|
||||
// extract only permission from mode
|
||||
// eslint-disable-next-line no-bitwise
|
||||
expect((await stat(serverlessObjectStorePath)).mode & 0o777).toBe(0o766);
|
||||
expect((await stat(serverlessObjectStorePath)).mode & 0o777).toBe(0o777);
|
||||
};
|
||||
|
||||
describe('resolveDockerImage()', () => {
|
||||
|
@ -103,6 +110,32 @@ describe('resolveDockerImage()', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('resolvePort()', () => {
|
||||
test('should return default port when no options', () => {
|
||||
const port = resolvePort({});
|
||||
|
||||
expect(port).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"-p",
|
||||
"127.0.0.1:9200:9200",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should return custom port when passed in options', () => {
|
||||
const port = resolvePort({ port: 9220 });
|
||||
|
||||
expect(port).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"-p",
|
||||
"127.0.0.1:9220:9220",
|
||||
"--env",
|
||||
"http.port=9220",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('verifyDockerInstalled()', () => {
|
||||
test('should call the correct Docker command and log the version', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.resolve({ stdout: 'Docker Version 123' }));
|
||||
|
@ -190,6 +223,55 @@ describe('maybeCreateDockerNetwork()', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('maybePullDockerImage()', () => {
|
||||
test('should pull the passed image', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.resolve({ exitCode: 0 }));
|
||||
|
||||
await maybePullDockerImage(log, DOCKER_IMG);
|
||||
|
||||
expect(execa.mock.calls[0][0]).toEqual('docker');
|
||||
expect(execa.mock.calls[0][1]).toEqual(expect.arrayContaining(['pull', DOCKER_IMG]));
|
||||
});
|
||||
});
|
||||
|
||||
describe('detectRunningNodes()', () => {
|
||||
const nodes = ['es01', 'es02', 'es03'];
|
||||
|
||||
test('should not error if no nodes detected', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.resolve({ stdout: '' }));
|
||||
|
||||
await detectRunningNodes(log, {});
|
||||
|
||||
expect(execa.mock.calls).toHaveLength(1);
|
||||
expect(execa.mock.calls[0][1]).toEqual(expect.arrayContaining(['ps', '--quiet', '--filter']));
|
||||
});
|
||||
|
||||
test('should kill nodes if detected and kill passed', async () => {
|
||||
execa.mockImplementationOnce(() =>
|
||||
Promise.resolve({
|
||||
stdout: nodes.join('\n'),
|
||||
})
|
||||
);
|
||||
|
||||
await detectRunningNodes(log, { kill: true });
|
||||
|
||||
expect(execa.mock.calls).toHaveLength(2);
|
||||
expect(execa.mock.calls[1][1]).toEqual(expect.arrayContaining(nodes.concat('kill')));
|
||||
});
|
||||
|
||||
test('should error if nodes detected and kill not passed', async () => {
|
||||
execa.mockImplementationOnce(() =>
|
||||
Promise.resolve({
|
||||
stdout: nodes.join('\n'),
|
||||
})
|
||||
);
|
||||
|
||||
await expect(detectRunningNodes(log, {})).rejects.toThrowErrorMatchingInlineSnapshot(
|
||||
`"ES has already been started, pass --kill to automatically stop the nodes on startup."`
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveEsArgs()', () => {
|
||||
const defaultEsArgs: Array<[string, string]> = [
|
||||
['foo', 'bar'],
|
||||
|
@ -253,6 +335,39 @@ describe('resolveEsArgs()', () => {
|
|||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should add SSL args and enable security when SSL is passed', () => {
|
||||
const esArgs = resolveEsArgs([...defaultEsArgs, ['xpack.security.enabled', 'false']], {
|
||||
ssl: true,
|
||||
});
|
||||
|
||||
expect(esArgs).toHaveLength(20);
|
||||
expect(esArgs).not.toEqual(expect.arrayContaining(['xpack.security.enabled=false']));
|
||||
expect(esArgs).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"--env",
|
||||
"foo=bar",
|
||||
"--env",
|
||||
"qux=zip",
|
||||
"--env",
|
||||
"xpack.security.enabled=true",
|
||||
"--env",
|
||||
"xpack.security.http.ssl.enabled=true",
|
||||
"--env",
|
||||
"xpack.security.http.ssl.keystore.path=/usr/share/elasticsearch/config/certs/elasticsearch.p12",
|
||||
"--env",
|
||||
"xpack.security.http.ssl.verification_mode=certificate",
|
||||
"--env",
|
||||
"xpack.security.transport.ssl.enabled=true",
|
||||
"--env",
|
||||
"xpack.security.transport.ssl.keystore.path=/usr/share/elasticsearch/config/certs/elasticsearch.p12",
|
||||
"--env",
|
||||
"xpack.security.transport.ssl.verification_mode=certificate",
|
||||
"--env",
|
||||
"xpack.security.operator_privileges.enabled=true",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('setupServerlessVolumes()', () => {
|
||||
|
@ -292,6 +407,20 @@ describe('setupServerlessVolumes()', () => {
|
|||
volumeCmdTest(volumeCmd);
|
||||
expect(existsSync(`${serverlessObjectStorePath}/cluster_state/lease`)).toBe(false);
|
||||
});
|
||||
|
||||
test('should add SSL volumes when ssl is passed', async () => {
|
||||
mockFs(existingObjectStore);
|
||||
|
||||
const volumeCmd = await setupServerlessVolumes(log, { basePath: baseEsPath, ssl: true });
|
||||
|
||||
const requiredPaths = [`${baseEsPath}:/objectstore:z`, ES_P12_PATH, ...ESS_RESOURCES_PATHS];
|
||||
const pathsNotIncludedInCmd = requiredPaths.filter(
|
||||
(path) => !volumeCmd.some((cmd) => cmd.includes(path))
|
||||
);
|
||||
|
||||
expect(volumeCmd).toHaveLength(20);
|
||||
expect(pathsNotIncludedInCmd).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('runServerlessEsNode()', () => {
|
||||
|
@ -333,8 +462,49 @@ describe('runServerlessCluster()', () => {
|
|||
|
||||
await runServerlessCluster(log, { basePath: baseEsPath });
|
||||
|
||||
// Verify Docker and network then run three nodes
|
||||
expect(execa.mock.calls).toHaveLength(5);
|
||||
// setupDocker execa calls then run three nodes and attach logger
|
||||
expect(execa.mock.calls).toHaveLength(8);
|
||||
});
|
||||
});
|
||||
|
||||
describe('stopServerlessCluster()', () => {
|
||||
test('should stop passed in nodes', async () => {
|
||||
const nodes = ['es01', 'es02', 'es03'];
|
||||
execa.mockImplementation(() => Promise.resolve({ stdout: '' }));
|
||||
|
||||
await stopServerlessCluster(log, nodes);
|
||||
|
||||
expect(execa.mock.calls[0][0]).toEqual('docker');
|
||||
expect(execa.mock.calls[0][1]).toEqual(
|
||||
expect.arrayContaining(['container', 'stop'].concat(nodes))
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('teardownServerlessClusterSync()', () => {
|
||||
const defaultOptions = { basePath: 'foo/bar' };
|
||||
|
||||
test('should kill running serverless nodes', () => {
|
||||
const nodes = ['es01', 'es02', 'es03'];
|
||||
execa.commandSync.mockImplementation(() => ({
|
||||
stdout: nodes.join('\n'),
|
||||
}));
|
||||
|
||||
teardownServerlessClusterSync(log, defaultOptions);
|
||||
|
||||
expect(execa.commandSync.mock.calls).toHaveLength(2);
|
||||
expect(execa.commandSync.mock.calls[0][0]).toEqual(expect.stringContaining(SERVERLESS_IMG));
|
||||
expect(execa.commandSync.mock.calls[1][0]).toEqual(`docker kill ${nodes.join(' ')}`);
|
||||
});
|
||||
|
||||
test('should not kill if no serverless nodes', () => {
|
||||
execa.commandSync.mockImplementation(() => ({
|
||||
stdout: '\n',
|
||||
}));
|
||||
|
||||
teardownServerlessClusterSync(log, defaultOptions);
|
||||
|
||||
expect(execa.commandSync.mock.calls).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -364,7 +534,7 @@ describe('runDockerContainer()', () => {
|
|||
execa.mockImplementation(() => Promise.resolve({ stdout: '' }));
|
||||
|
||||
await expect(runDockerContainer(log, {})).resolves.toEqual({ stdout: '' });
|
||||
// Verify Docker and network then run container
|
||||
expect(execa.mock.calls).toHaveLength(3);
|
||||
// setupDocker execa calls then run container
|
||||
expect(execa.mock.calls).toHaveLength(5);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -9,17 +9,29 @@ import chalk from 'chalk';
|
|||
import execa from 'execa';
|
||||
import fs from 'fs';
|
||||
import Fsp from 'fs/promises';
|
||||
import { resolve } from 'path';
|
||||
import { resolve, basename, join } from 'path';
|
||||
|
||||
import { ToolingLog } from '@kbn/tooling-log';
|
||||
import { kibanaPackageJson as pkg } from '@kbn/repo-info';
|
||||
import { kibanaPackageJson as pkg, REPO_ROOT } from '@kbn/repo-info';
|
||||
import { ES_P12_PASSWORD, ES_P12_PATH } from '@kbn/dev-utils';
|
||||
|
||||
import { createCliError } from '../errors';
|
||||
import { EsClusterExecOptions } from '../cluster_exec_options';
|
||||
import {
|
||||
ESS_RESOURCES_PATHS,
|
||||
ESS_SECRETS_PATH,
|
||||
ESS_JWKS_PATH,
|
||||
ESS_CONFIG_PATH,
|
||||
ESS_FILES_PATH,
|
||||
} from '../paths';
|
||||
|
||||
interface BaseOptions {
|
||||
tag?: string;
|
||||
image?: string;
|
||||
port?: number;
|
||||
ssl?: boolean;
|
||||
kill?: boolean;
|
||||
files?: string | string[];
|
||||
}
|
||||
|
||||
export interface DockerOptions extends EsClusterExecOptions, BaseOptions {
|
||||
|
@ -29,6 +41,8 @@ export interface DockerOptions extends EsClusterExecOptions, BaseOptions {
|
|||
export interface ServerlessOptions extends EsClusterExecOptions, BaseOptions {
|
||||
clean?: boolean;
|
||||
basePath: string;
|
||||
teardown?: boolean;
|
||||
background?: boolean;
|
||||
}
|
||||
|
||||
interface ServerlessEsNodeArgs {
|
||||
|
@ -38,6 +52,7 @@ interface ServerlessEsNodeArgs {
|
|||
params: string[];
|
||||
}
|
||||
|
||||
export const DEFAULT_PORT = 9200;
|
||||
const DOCKER_REGISTRY = 'docker.elastic.co';
|
||||
|
||||
const DOCKER_BASE_CMD = [
|
||||
|
@ -53,9 +68,6 @@ const DOCKER_BASE_CMD = [
|
|||
'--name',
|
||||
'es01',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9200:9200',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9300:9300',
|
||||
];
|
||||
|
@ -78,6 +90,8 @@ export const SERVERLESS_REPO = `${DOCKER_REGISTRY}/elasticsearch-ci/elasticsearc
|
|||
export const SERVERLESS_TAG = 'latest';
|
||||
export const SERVERLESS_IMG = `${SERVERLESS_REPO}:${SERVERLESS_TAG}`;
|
||||
|
||||
// See for default cluster settings
|
||||
// https://github.com/elastic/elasticsearch-serverless/blob/main/serverless-build-tools/src/main/kotlin/elasticsearch.serverless-run.gradle.kts
|
||||
const SHARED_SERVERLESS_PARAMS = [
|
||||
'run',
|
||||
|
||||
|
@ -85,9 +99,16 @@ const SHARED_SERVERLESS_PARAMS = [
|
|||
|
||||
'--detach',
|
||||
|
||||
'--interactive',
|
||||
|
||||
'--tty',
|
||||
|
||||
'--net',
|
||||
'elastic',
|
||||
|
||||
'--env',
|
||||
'path.repo=/objectstore',
|
||||
|
||||
'--env',
|
||||
'cluster.initial_master_nodes=es01,es02,es03',
|
||||
|
||||
|
@ -99,27 +120,65 @@ const SHARED_SERVERLESS_PARAMS = [
|
|||
|
||||
'--env',
|
||||
'stateless.object_store.bucket=stateless',
|
||||
|
||||
'--env',
|
||||
'path.repo=/objectstore',
|
||||
];
|
||||
|
||||
// only allow certain ES args to be overwrote by options
|
||||
const DEFAULT_SERVERLESS_ESARGS: Array<[string, string]> = [
|
||||
['ES_JAVA_OPTS', '-Xms1g -Xmx1g'],
|
||||
|
||||
['xpack.security.enabled', 'false'],
|
||||
['ES_LOG_STYLE', 'file'],
|
||||
|
||||
['cluster.name', 'stateless'],
|
||||
|
||||
['ingest.geoip.downloader.enabled', 'false'],
|
||||
|
||||
['xpack.ml.enabled', 'true'],
|
||||
|
||||
['xpack.security.enabled', 'false'],
|
||||
];
|
||||
|
||||
const DEFAULT_SSL_ESARGS: Array<[string, string]> = [
|
||||
['xpack.security.enabled', 'true'],
|
||||
|
||||
['xpack.security.http.ssl.enabled', 'true'],
|
||||
|
||||
['xpack.security.http.ssl.keystore.path', `${ESS_CONFIG_PATH}certs/elasticsearch.p12`],
|
||||
|
||||
['xpack.security.http.ssl.verification_mode', 'certificate'],
|
||||
|
||||
['xpack.security.transport.ssl.enabled', 'true'],
|
||||
|
||||
['xpack.security.transport.ssl.keystore.path', `${ESS_CONFIG_PATH}certs/elasticsearch.p12`],
|
||||
|
||||
['xpack.security.transport.ssl.verification_mode', 'certificate'],
|
||||
|
||||
['xpack.security.operator_privileges.enabled', 'true'],
|
||||
];
|
||||
|
||||
const SERVERLESS_SSL_ESARGS: Array<[string, string]> = [
|
||||
['xpack.security.authc.realms.jwt.jwt1.client_authentication.type', 'shared_secret'],
|
||||
|
||||
['xpack.security.authc.realms.jwt.jwt1.order', '-98'],
|
||||
|
||||
['xpack.security.authc.realms.jwt.jwt1.allowed_issuer', 'https://kibana.elastic.co/jwt/'],
|
||||
|
||||
['xpack.security.authc.realms.jwt.jwt1.allowed_audiences', 'elasticsearch'],
|
||||
|
||||
['xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path', `${ESS_CONFIG_PATH}secrets/jwks.json`],
|
||||
|
||||
['xpack.security.authc.realms.jwt.jwt1.claims.principal', 'sub'],
|
||||
];
|
||||
|
||||
const DOCKER_SSL_ESARGS: Array<[string, string]> = [
|
||||
['xpack.security.http.ssl.keystore.password', ES_P12_PASSWORD],
|
||||
|
||||
['xpack.security.transport.ssl.keystore.password', ES_P12_PASSWORD],
|
||||
];
|
||||
|
||||
const SERVERLESS_NODES: Array<Omit<ServerlessEsNodeArgs, 'image'>> = [
|
||||
{
|
||||
name: 'es01',
|
||||
params: [
|
||||
'-p',
|
||||
'127.0.0.1:9200:9200',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9300:9300',
|
||||
|
||||
|
@ -127,9 +186,13 @@ const SERVERLESS_NODES: Array<Omit<ServerlessEsNodeArgs, 'image'>> = [
|
|||
'discovery.seed_hosts=es02,es03',
|
||||
|
||||
'--env',
|
||||
'node.roles=["master","index"]',
|
||||
'node.roles=["master","remote_cluster_client","ingest","index"]',
|
||||
],
|
||||
esArgs: [
|
||||
['xpack.searchable.snapshot.shared_cache.size', '16MB'],
|
||||
|
||||
['xpack.searchable.snapshot.shared_cache.region_size', '256K'],
|
||||
],
|
||||
esArgs: [['xpack.searchable.snapshot.shared_cache.size', '1gb']],
|
||||
},
|
||||
{
|
||||
name: 'es02',
|
||||
|
@ -144,9 +207,13 @@ const SERVERLESS_NODES: Array<Omit<ServerlessEsNodeArgs, 'image'>> = [
|
|||
'discovery.seed_hosts=es01,es03',
|
||||
|
||||
'--env',
|
||||
'node.roles=["master","search"]',
|
||||
'node.roles=["master","remote_cluster_client","search"]',
|
||||
],
|
||||
esArgs: [
|
||||
['xpack.searchable.snapshot.shared_cache.size', '16MB'],
|
||||
|
||||
['xpack.searchable.snapshot.shared_cache.region_size', '256K'],
|
||||
],
|
||||
esArgs: [['xpack.searchable.snapshot.shared_cache.size', '1gb']],
|
||||
},
|
||||
{
|
||||
name: 'es03',
|
||||
|
@ -161,7 +228,7 @@ const SERVERLESS_NODES: Array<Omit<ServerlessEsNodeArgs, 'image'>> = [
|
|||
'discovery.seed_hosts=es01,es02',
|
||||
|
||||
'--env',
|
||||
'node.roles=["master"]',
|
||||
'node.roles=["master","remote_cluster_client","ml","transform"]',
|
||||
],
|
||||
},
|
||||
];
|
||||
|
@ -190,6 +257,22 @@ export function resolveDockerImage({
|
|||
return defaultImg;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine the port to bind the Serverless index node or Docker node to
|
||||
*/
|
||||
export function resolvePort(options: ServerlessOptions | DockerOptions) {
|
||||
if (options.port) {
|
||||
return [
|
||||
'-p',
|
||||
`127.0.0.1:${options.port}:${options.port}`,
|
||||
'--env',
|
||||
`http.port=${options.port}`,
|
||||
];
|
||||
}
|
||||
|
||||
return ['-p', `127.0.0.1:${DEFAULT_PORT}:${DEFAULT_PORT}`];
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that Docker is installed locally
|
||||
*/
|
||||
|
@ -227,12 +310,66 @@ export async function maybeCreateDockerNetwork(log: ToolingLog) {
|
|||
log.indent(-4);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Pull a Docker image if needed. Ensures latest image.
|
||||
* Stops serverless from pulling the same image in each node's promise and
|
||||
* gives better control of log output, instead of falling back to docker run.
|
||||
*/
|
||||
export async function maybePullDockerImage(log: ToolingLog, image: string) {
|
||||
log.info(chalk.bold(`Checking for image: ${image}`));
|
||||
|
||||
await execa('docker', ['pull', image], {
|
||||
// inherit is required to show Docker output
|
||||
stdio: ['ignore', 'inherit', 'inherit'],
|
||||
}).catch(({ message }) => {
|
||||
throw createCliError(message);
|
||||
});
|
||||
}
|
||||
|
||||
export async function detectRunningNodes(
|
||||
log: ToolingLog,
|
||||
options: ServerlessOptions | DockerOptions
|
||||
) {
|
||||
const namesCmd = SERVERLESS_NODES.reduce<string[]>((acc, { name }) => {
|
||||
acc.push('--filter', `name=${name}`);
|
||||
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
const { stdout } = await execa('docker', ['ps', '--quiet'].concat(namesCmd));
|
||||
const runningNodes = stdout.split(/\r?\n/).filter((s) => s);
|
||||
|
||||
if (runningNodes.length) {
|
||||
if (options.kill) {
|
||||
log.info(chalk.bold('Killing running ES Nodes.'));
|
||||
await execa('docker', ['kill'].concat(runningNodes));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
throw createCliError(
|
||||
'ES has already been started, pass --kill to automatically stop the nodes on startup.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Common setup for Docker and Serverless containers
|
||||
*/
|
||||
async function setupDocker(log: ToolingLog) {
|
||||
async function setupDocker({
|
||||
log,
|
||||
image,
|
||||
options,
|
||||
}: {
|
||||
log: ToolingLog;
|
||||
image: string;
|
||||
options: ServerlessOptions | DockerOptions;
|
||||
}) {
|
||||
await verifyDockerInstalled(log);
|
||||
await detectRunningNodes(log, options);
|
||||
await maybeCreateDockerNetwork(log);
|
||||
await maybePullDockerImage(log, image);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -242,10 +379,17 @@ export function resolveEsArgs(
|
|||
defaultEsArgs: Array<[string, string]>,
|
||||
options: ServerlessOptions | DockerOptions
|
||||
) {
|
||||
const { esArgs: customEsArgs, password, ssl } = options;
|
||||
const esArgs = new Map(defaultEsArgs);
|
||||
|
||||
if (options.esArgs) {
|
||||
const args = typeof options.esArgs === 'string' ? [options.esArgs] : options.esArgs;
|
||||
if (ssl) {
|
||||
DEFAULT_SSL_ESARGS.forEach((arg) => {
|
||||
esArgs.set(arg[0], arg[1]);
|
||||
});
|
||||
}
|
||||
|
||||
if (customEsArgs) {
|
||||
const args = typeof customEsArgs === 'string' ? [customEsArgs] : customEsArgs;
|
||||
|
||||
args.forEach((arg) => {
|
||||
const [key, ...value] = arg.split('=');
|
||||
|
@ -253,29 +397,43 @@ export function resolveEsArgs(
|
|||
});
|
||||
}
|
||||
|
||||
if (options.password) {
|
||||
esArgs.set('ELASTIC_PASSWORD', options.password);
|
||||
if (password) {
|
||||
esArgs.set('ELASTIC_PASSWORD', password);
|
||||
}
|
||||
|
||||
return Array.from(esArgs).flatMap((e) => ['--env', e.join('=')]);
|
||||
}
|
||||
|
||||
function getESp12Volume() {
|
||||
return ['--volume', `${ES_P12_PATH}:${ESS_CONFIG_PATH}certs/elasticsearch.p12`];
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes REPO_ROOT from hostPath. Keep the rest to avoid filename collisions.
|
||||
* Returns the path where a file will be mounted inside the ES or ESS container.
|
||||
* /root/kibana/package/foo/bar.json => /usr/share/elasticsearch/files/package/foo/bar.json
|
||||
*/
|
||||
export function getDockerFileMountPath(hostPath: string) {
|
||||
return join(ESS_FILES_PATH, hostPath.replace(REPO_ROOT, ''));
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup local volumes for Serverless ES
|
||||
*/
|
||||
export async function setupServerlessVolumes(log: ToolingLog, options: ServerlessOptions) {
|
||||
const volumePath = resolve(options.basePath, 'stateless');
|
||||
const { basePath, clean, ssl, files } = options;
|
||||
const objectStorePath = resolve(basePath, 'stateless');
|
||||
|
||||
log.info(chalk.bold(`Checking for local Serverless ES object store at ${volumePath}`));
|
||||
log.info(chalk.bold(`Checking for local serverless ES object store at ${objectStorePath}`));
|
||||
log.indent(4);
|
||||
|
||||
if (options.clean && fs.existsSync(volumePath)) {
|
||||
if (clean && fs.existsSync(objectStorePath)) {
|
||||
log.info('Cleaning existing object store.');
|
||||
await Fsp.rm(volumePath, { recursive: true, force: true });
|
||||
await Fsp.rm(objectStorePath, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
if (options.clean || !fs.existsSync(volumePath)) {
|
||||
await Fsp.mkdir(volumePath, { recursive: true }).then(() =>
|
||||
if (clean || !fs.existsSync(objectStorePath)) {
|
||||
await Fsp.mkdir(objectStorePath, { recursive: true }).then(() =>
|
||||
log.info('Created new object store.')
|
||||
);
|
||||
} else {
|
||||
|
@ -283,13 +441,45 @@ export async function setupServerlessVolumes(log: ToolingLog, options: Serverles
|
|||
}
|
||||
|
||||
// Permissions are set separately from mkdir due to default umask
|
||||
await Fsp.chmod(volumePath, 0o766).then(() =>
|
||||
log.info('Setup object store permissions (chmod 766).')
|
||||
);
|
||||
await Fsp.chmod(objectStorePath, 0o777).then(() => {
|
||||
log.info('Setup object store permissions (chmod 777).');
|
||||
});
|
||||
|
||||
log.indent(-4);
|
||||
|
||||
return ['--volume', `${options.basePath}:/objectstore:z`];
|
||||
const volumeCmds = ['--volume', `${basePath}:/objectstore:z`];
|
||||
|
||||
if (files) {
|
||||
const _files = typeof files === 'string' ? [files] : files;
|
||||
const fileCmds = _files.reduce<string[]>((acc, filePath) => {
|
||||
acc.push('--volume', `${filePath}:${getDockerFileMountPath(filePath)}:z`);
|
||||
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
volumeCmds.push(...fileCmds);
|
||||
}
|
||||
|
||||
if (ssl) {
|
||||
const essResources = ESS_RESOURCES_PATHS.reduce<string[]>((acc, path) => {
|
||||
acc.push('--volume', `${path}:${ESS_CONFIG_PATH}${basename(path)}`);
|
||||
|
||||
return acc;
|
||||
}, []);
|
||||
|
||||
volumeCmds.push(
|
||||
...getESp12Volume(),
|
||||
...essResources,
|
||||
|
||||
'--volume',
|
||||
`${ESS_SECRETS_PATH}:${ESS_CONFIG_PATH}secrets/secrets.json:z`,
|
||||
|
||||
'--volume',
|
||||
`${ESS_JWKS_PATH}:${ESS_CONFIG_PATH}secrets/jwks.json:z`
|
||||
);
|
||||
}
|
||||
|
||||
return volumeCmds;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -316,7 +506,7 @@ export async function runServerlessEsNode(
|
|||
image
|
||||
);
|
||||
|
||||
log.info(chalk.bold(`Running Serverless ES node: ${name}`));
|
||||
log.info(chalk.bold(`Running serverless ES node: ${name}`));
|
||||
log.indent(4, () => log.info(chalk.dim(`docker ${dockerCmd.join(' ')}`)));
|
||||
|
||||
const { stdout } = await execa('docker', dockerCmd);
|
||||
|
@ -336,18 +526,25 @@ export async function runServerlessEsNode(
|
|||
* Runs an ES Serverless Cluster through Docker
|
||||
*/
|
||||
export async function runServerlessCluster(log: ToolingLog, options: ServerlessOptions) {
|
||||
await setupDocker(log);
|
||||
const image = getServerlessImage(options);
|
||||
await setupDocker({ log, image, options });
|
||||
|
||||
const volumeCmd = await setupServerlessVolumes(log, options);
|
||||
const image = getServerlessImage(options);
|
||||
|
||||
const nodeNames = await Promise.all(
|
||||
SERVERLESS_NODES.map(async (node) => {
|
||||
SERVERLESS_NODES.map(async (node, i) => {
|
||||
await runServerlessEsNode(log, {
|
||||
...node,
|
||||
image,
|
||||
params: node.params.concat(
|
||||
resolveEsArgs(DEFAULT_SERVERLESS_ESARGS.concat(node.esArgs ?? []), options),
|
||||
resolveEsArgs(
|
||||
DEFAULT_SERVERLESS_ESARGS.concat(
|
||||
node.esArgs ?? [],
|
||||
options.ssl ? SERVERLESS_SSL_ESARGS : []
|
||||
),
|
||||
options
|
||||
),
|
||||
i === 0 ? resolvePort(options) : [],
|
||||
volumeCmd
|
||||
),
|
||||
});
|
||||
|
@ -358,6 +555,42 @@ export async function runServerlessCluster(log: ToolingLog, options: ServerlessO
|
|||
log.success(`Serverless ES cluster running.
|
||||
Stop the cluster: ${chalk.bold(`docker container stop ${nodeNames.join(' ')}`)}
|
||||
`);
|
||||
|
||||
if (!options.background) {
|
||||
// The ESS cluster has to be started detached, so we attach a logger afterwards for output
|
||||
await execa('docker', ['logs', '-f', SERVERLESS_NODES[0].name], {
|
||||
// inherit is required to show Docker output and Java console output for pw, enrollment token, etc
|
||||
stdio: ['ignore', 'inherit', 'inherit'],
|
||||
});
|
||||
}
|
||||
|
||||
return nodeNames;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop a serverless ES cluster by node names
|
||||
*/
|
||||
export async function stopServerlessCluster(log: ToolingLog, nodes: string[]) {
|
||||
log.info('Stopping serverless ES cluster.');
|
||||
|
||||
await execa('docker', ['container', 'stop'].concat(nodes));
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill any serverless ES nodes which are running.
|
||||
*/
|
||||
export function teardownServerlessClusterSync(log: ToolingLog, options: ServerlessOptions) {
|
||||
const { stdout } = execa.commandSync(
|
||||
`docker ps --filter status=running --filter ancestor=${getServerlessImage(options)} --quiet`
|
||||
);
|
||||
// Filter empty strings
|
||||
const runningNodes = stdout.split(/\r?\n/).filter((s) => s);
|
||||
|
||||
if (runningNodes.length) {
|
||||
log.info('Killing running serverless ES nodes.');
|
||||
|
||||
execa.commandSync(`docker kill ${runningNodes.join(' ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -370,29 +603,35 @@ function getDockerImage(options: DockerOptions) {
|
|||
/**
|
||||
* Resolve the full command to run Elasticsearch Docker container
|
||||
*/
|
||||
export function resolveDockerCmd(options: DockerOptions) {
|
||||
export function resolveDockerCmd(options: DockerOptions, image: string = DOCKER_IMG) {
|
||||
if (options.dockerCmd) {
|
||||
return options.dockerCmd.split(' ');
|
||||
}
|
||||
|
||||
return DOCKER_BASE_CMD.concat(
|
||||
resolveEsArgs(DEFAULT_DOCKER_ESARGS, options),
|
||||
getDockerImage(options)
|
||||
resolveEsArgs(DEFAULT_DOCKER_ESARGS.concat(options.ssl ? DOCKER_SSL_ESARGS : []), options),
|
||||
resolvePort(options),
|
||||
options.ssl ? getESp12Volume() : [],
|
||||
image
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Runs an Elasticsearch Docker Container
|
||||
*/
|
||||
export async function runDockerContainer(log: ToolingLog, options: DockerOptions) {
|
||||
await setupDocker(log);
|
||||
let image;
|
||||
|
||||
const dockerCmd = resolveDockerCmd(options);
|
||||
if (!options.dockerCmd) {
|
||||
image = getDockerImage(options);
|
||||
await setupDocker({ log, image, options });
|
||||
}
|
||||
|
||||
const dockerCmd = resolveDockerCmd(options, image);
|
||||
|
||||
log.info(chalk.dim(`docker ${dockerCmd.join(' ')}`));
|
||||
return await execa('docker', dockerCmd, {
|
||||
// inherit is required to show Docker pull output and Java console output for pw, enrollment token, etc
|
||||
// inherit is required to show Docker output and Java console output for pw, enrollment token, etc
|
||||
stdio: ['ignore', 'inherit', 'inherit'],
|
||||
});
|
||||
}
|
||||
|
|
10
packages/kbn-es/src/utils/ess_file_realm.ts
Normal file
10
packages/kbn-es/src/utils/ess_file_realm.ts
Normal file
|
@ -0,0 +1,10 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export const ELASTIC_SERVERLESS_SUPERUSER = 'elastic_serverless';
|
||||
export const ELASTIC_SERVERLESS_SUPERUSER_PASSWORD = 'changeme';
|
|
@ -17,3 +17,4 @@ export { buildSnapshot } from './build_snapshot';
|
|||
export { archiveForPlatform } from './build_snapshot';
|
||||
export * from './parse_timeout_to_ms';
|
||||
export * from './docker';
|
||||
export * from './ess_file_realm';
|
||||
|
|
|
@ -38,6 +38,7 @@ export {
|
|||
kibanaTestUser,
|
||||
adminTestUser,
|
||||
systemIndicesSuperuser,
|
||||
kibanaTestSuperuserServerless,
|
||||
} from './src/kbn';
|
||||
|
||||
// @internal
|
||||
|
@ -58,3 +59,5 @@ export * from './src/kbn_archiver_cli';
|
|||
export * from './src/kbn_client';
|
||||
|
||||
export * from './src/find_test_plugin_paths';
|
||||
|
||||
export { getDockerFileMountPath } from '@kbn/es';
|
||||
|
|
|
@ -18,12 +18,17 @@ export interface UrlParts {
|
|||
password?: string;
|
||||
}
|
||||
|
||||
interface UserAuth {
|
||||
username: string;
|
||||
password: string;
|
||||
}
|
||||
|
||||
export const kbnTestConfig = new (class KbnTestConfig {
|
||||
getPort() {
|
||||
return this.getUrlParts().port;
|
||||
}
|
||||
|
||||
getUrlParts(): UrlParts {
|
||||
getUrlParts(user: UserAuth = kibanaTestUser): UrlParts {
|
||||
// allow setting one complete TEST_KIBANA_URL for ES like https://elastic:changeme@example.com:9200
|
||||
if (process.env.TEST_KIBANA_URL) {
|
||||
const testKibanaUrl = url.parse(process.env.TEST_KIBANA_URL);
|
||||
|
@ -37,8 +42,8 @@ export const kbnTestConfig = new (class KbnTestConfig {
|
|||
};
|
||||
}
|
||||
|
||||
const username = process.env.TEST_KIBANA_USERNAME || kibanaTestUser.username;
|
||||
const password = process.env.TEST_KIBANA_PASSWORD || kibanaTestUser.password;
|
||||
const username = process.env.TEST_KIBANA_USERNAME || user.username;
|
||||
const password = process.env.TEST_KIBANA_PASSWORD || user.password;
|
||||
return {
|
||||
protocol: process.env.TEST_KIBANA_PROTOCOL || 'http',
|
||||
hostname: process.env.TEST_KIBANA_HOSTNAME || 'localhost',
|
||||
|
|
|
@ -143,6 +143,14 @@ export interface CreateTestEsClusterOptions {
|
|||
* this caller to react appropriately. If this is not passed then an uncatchable exception will be thrown
|
||||
*/
|
||||
onEarlyExit?: (msg: string) => void;
|
||||
/**
|
||||
* Is this a serverless project
|
||||
*/
|
||||
serverless?: boolean;
|
||||
/**
|
||||
* Files to mount inside ES containers
|
||||
*/
|
||||
files?: string[];
|
||||
}
|
||||
|
||||
export function createTestEsCluster<
|
||||
|
@ -164,6 +172,7 @@ export function createTestEsCluster<
|
|||
ssl,
|
||||
transportPort,
|
||||
onEarlyExit,
|
||||
files,
|
||||
} = options;
|
||||
|
||||
const clusterName = `${CI_PARALLEL_PROCESS_PREFIX}${customClusterName}`;
|
||||
|
@ -218,6 +227,18 @@ export function createTestEsCluster<
|
|||
installPath = (await firstNode.installSource(config)).installPath;
|
||||
} else if (esFrom === 'snapshot') {
|
||||
installPath = (await firstNode.installSnapshot(config)).installPath;
|
||||
} else if (esFrom === 'serverless') {
|
||||
return await firstNode.runServerless({
|
||||
basePath,
|
||||
esArgs: customEsArgs,
|
||||
port,
|
||||
clean: true,
|
||||
teardown: true,
|
||||
ssl: true,
|
||||
background: true,
|
||||
files,
|
||||
kill: true, // likely don't need this but avoids any issues where the ESS cluster wasn't cleaned up
|
||||
});
|
||||
} else if (Path.isAbsolute(esFrom)) {
|
||||
installPath = esFrom;
|
||||
} else {
|
||||
|
|
|
@ -210,6 +210,7 @@ export const schema = Joi.object()
|
|||
scheme: /https?/,
|
||||
}),
|
||||
}),
|
||||
files: Joi.array().items(Joi.string()),
|
||||
})
|
||||
.default(),
|
||||
|
||||
|
|
|
@ -46,6 +46,8 @@ function getEsConfig({
|
|||
: config.get('servers.elasticsearch.password');
|
||||
|
||||
const dataArchive: string | undefined = config.get('esTestCluster.dataArchive');
|
||||
const serverless: boolean = config.get('serverless');
|
||||
const files: string[] | undefined = config.get('esTestCluster.files');
|
||||
|
||||
return {
|
||||
ssl,
|
||||
|
@ -58,6 +60,8 @@ function getEsConfig({
|
|||
password,
|
||||
dataArchive,
|
||||
ccsConfig,
|
||||
serverless,
|
||||
files,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -140,6 +144,8 @@ async function startEsNode({
|
|||
],
|
||||
transportPort: config.transportPort,
|
||||
onEarlyExit,
|
||||
serverless: config.serverless,
|
||||
files: config.files,
|
||||
});
|
||||
|
||||
await cluster.start();
|
||||
|
|
|
@ -41,7 +41,7 @@ describe('parse runTest flags', () => {
|
|||
<absolute path>/foo,
|
||||
],
|
||||
"dryRun": false,
|
||||
"esFrom": "snapshot",
|
||||
"esFrom": undefined,
|
||||
"esVersion": <EsVersion 9.9.9>,
|
||||
"grep": undefined,
|
||||
"installDir": undefined,
|
||||
|
@ -108,7 +108,7 @@ describe('parse runTest flags', () => {
|
|||
|
||||
it('validates esFrom', () => {
|
||||
expect(() => test({ esFrom: 'foo' })).toThrowErrorMatchingInlineSnapshot(
|
||||
`"invalid --esFrom, expected one of \\"snapshot\\", \\"source\\""`
|
||||
`"invalid --esFrom, expected one of \\"snapshot\\", \\"source\\", \\"serverless\\""`
|
||||
);
|
||||
});
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ export const FLAG_OPTIONS: FlagOptions = {
|
|||
help: `
|
||||
--config Define a FTR config that should be executed. Can be specified multiple times
|
||||
--journey Define a Journey that should be executed. Can be specified multiple times
|
||||
--esFrom Build Elasticsearch from source or run from snapshot. Default: $TEST_ES_FROM or "snapshot"
|
||||
--esFrom Build Elasticsearch from source or run snapshot or serverless. Default: $TEST_ES_FROM or "snapshot"
|
||||
--include-tag Tags that suites must include to be run, can be included multiple times
|
||||
--exclude-tag Tags that suites must NOT include to be run, can be included multiple times
|
||||
--include Files that must included to be run, can be included multiple times
|
||||
|
@ -74,7 +74,7 @@ export function parseFlags(flags: FlagsReader) {
|
|||
logsDir: flags.boolean('logToFile')
|
||||
? Path.resolve(REPO_ROOT, 'data/ftr_servers_logs', uuidV4())
|
||||
: undefined,
|
||||
esFrom: flags.enum('esFrom', ['snapshot', 'source']) ?? 'snapshot',
|
||||
esFrom: flags.enum('esFrom', ['snapshot', 'source', 'serverless']),
|
||||
installDir: flags.path('kibana-install-dir'),
|
||||
grep: flags.string('grep'),
|
||||
suiteTags: {
|
||||
|
|
|
@ -42,6 +42,9 @@ export async function runTests(log: ToolingLog, options: RunTestsOptions) {
|
|||
dryRun: options.dryRun,
|
||||
grep: options.grep,
|
||||
},
|
||||
esTestCluster: {
|
||||
from: options.esFrom,
|
||||
},
|
||||
kbnTestServer: {
|
||||
installDir: options.installDir,
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ export const FLAG_OPTIONS: FlagOptions = {
|
|||
help: `
|
||||
--config Define a FTR config that should be executed. Can be specified multiple times
|
||||
--journey Define a Journey that should be executed. Can be specified multiple times
|
||||
--esFrom Build Elasticsearch from source or run from snapshot. Default: $TEST_ES_FROM or "snapshot"
|
||||
--esFrom Build Elasticsearch from source or run snapshot or serverless. Default: $TEST_ES_FROM or "snapshot"
|
||||
--kibana-install-dir Run Kibana from existing install directory instead of from source
|
||||
--logToFile Write the log output from Kibana/ES to files instead of to stdout
|
||||
`,
|
||||
|
@ -40,7 +40,7 @@ export function parseFlags(flags: FlagsReader) {
|
|||
|
||||
return {
|
||||
config: configs[0],
|
||||
esFrom: flags.enum('esFrom', ['source', 'snapshot']),
|
||||
esFrom: flags.enum('esFrom', ['source', 'snapshot', 'serverless']),
|
||||
esVersion: EsVersion.getDefault(),
|
||||
installDir: flags.string('kibana-install-dir'),
|
||||
logsDir: flags.boolean('logToFile')
|
||||
|
|
|
@ -11,4 +11,5 @@ export {
|
|||
kibanaServerTestUser,
|
||||
adminTestUser,
|
||||
systemIndicesSuperuser,
|
||||
kibanaTestSuperuserServerless,
|
||||
} from './users';
|
||||
|
|
|
@ -6,7 +6,11 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { SYSTEM_INDICES_SUPERUSER } from '@kbn/es';
|
||||
import {
|
||||
SYSTEM_INDICES_SUPERUSER,
|
||||
ELASTIC_SERVERLESS_SUPERUSER,
|
||||
ELASTIC_SERVERLESS_SUPERUSER_PASSWORD,
|
||||
} from '@kbn/es';
|
||||
|
||||
const env = process.env;
|
||||
|
||||
|
@ -32,3 +36,8 @@ export const systemIndicesSuperuser = {
|
|||
username: SYSTEM_INDICES_SUPERUSER,
|
||||
password: env.TEST_ES_PASS || 'changeme',
|
||||
};
|
||||
|
||||
export const kibanaTestSuperuserServerless = {
|
||||
username: ELASTIC_SERVERLESS_SUPERUSER,
|
||||
password: ELASTIC_SERVERLESS_SUPERUSER_PASSWORD,
|
||||
};
|
||||
|
|
|
@ -15,6 +15,7 @@ import { isKibanaDistributable } from '@kbn/repo-info';
|
|||
import { readKeystore } from '../keystore/read_keystore';
|
||||
import { compileConfigStack } from './compile_config_stack';
|
||||
import { getConfigFromFiles } from '@kbn/config';
|
||||
import { kibanaDevServiceAccount } from '@kbn/dev-utils';
|
||||
|
||||
const DEV_MODE_PATH = '@kbn/cli-dev-mode';
|
||||
const DEV_MODE_SUPPORTED = canRequire(DEV_MODE_PATH);
|
||||
|
@ -68,6 +69,10 @@ export function applyConfigOverrides(rawConfig, opts, extraCliOptions) {
|
|||
delete extraCliOptions.env;
|
||||
|
||||
if (opts.dev) {
|
||||
if (opts.serverless) {
|
||||
set('elasticsearch.serviceAccountToken', kibanaDevServiceAccount.token);
|
||||
}
|
||||
|
||||
if (!has('elasticsearch.serviceAccountToken') && opts.devCredentials !== false) {
|
||||
if (!has('elasticsearch.username')) {
|
||||
set('elasticsearch.username', 'kibana_system');
|
||||
|
|
|
@ -0,0 +1,191 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import _ from 'lodash';
|
||||
|
||||
import { EsVersion, readConfigFile } from '@kbn/test';
|
||||
import type { ToolingLog } from '@kbn/tooling-log';
|
||||
import { getLocalhostRealIp } from '../endpoint/common/localhost_services';
|
||||
import type { parseTestFileConfig } from './utils';
|
||||
|
||||
export const getFTRConfig = ({
|
||||
log,
|
||||
esPort,
|
||||
kibanaPort,
|
||||
fleetServerPort,
|
||||
ftrConfigFilePath,
|
||||
specFilePath,
|
||||
specFileFTRConfig,
|
||||
isOpen,
|
||||
}: {
|
||||
log: ToolingLog;
|
||||
esPort: number;
|
||||
kibanaPort: number;
|
||||
fleetServerPort: number;
|
||||
ftrConfigFilePath: string;
|
||||
specFilePath: string;
|
||||
specFileFTRConfig: ReturnType<typeof parseTestFileConfig>;
|
||||
isOpen: boolean;
|
||||
}) =>
|
||||
readConfigFile(
|
||||
log,
|
||||
EsVersion.getDefault(),
|
||||
ftrConfigFilePath,
|
||||
{
|
||||
servers: {
|
||||
elasticsearch: {
|
||||
port: esPort,
|
||||
},
|
||||
kibana: {
|
||||
port: kibanaPort,
|
||||
},
|
||||
fleetserver: {
|
||||
port: fleetServerPort,
|
||||
},
|
||||
},
|
||||
// CAUTION: Do not override here kbnTestServer.serverArgs
|
||||
// or important configs like ssl key and certificate will be lost.
|
||||
// Please do it in the section bellow on extendedSettings
|
||||
//
|
||||
// kbnTestServer: {
|
||||
// serverArgs: [
|
||||
// ...
|
||||
// ],
|
||||
// },
|
||||
},
|
||||
(vars) => {
|
||||
const hostRealIp = getLocalhostRealIp();
|
||||
|
||||
const hasFleetServerArgs = _.some(
|
||||
vars.kbnTestServer.serverArgs,
|
||||
(value) =>
|
||||
value.includes('--xpack.fleet.agents.fleet_server.hosts') ||
|
||||
value.includes('--xpack.fleet.agents.elasticsearch.host')
|
||||
);
|
||||
|
||||
vars.kbnTestServer.serverArgs = _.filter(
|
||||
vars.kbnTestServer.serverArgs,
|
||||
(value) =>
|
||||
!(
|
||||
value.includes('--elasticsearch.hosts') ||
|
||||
value.includes('--xpack.fleet.agents.fleet_server.hosts') ||
|
||||
value.includes('--xpack.fleet.agents.elasticsearch.host') ||
|
||||
value.includes('--server.port')
|
||||
)
|
||||
);
|
||||
|
||||
// NOTE: extending server args here as settingOverrides above is removing some important SSL configs
|
||||
// like key and certificate
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--server.port=${kibanaPort}`,
|
||||
`--elasticsearch.hosts=http://localhost:${esPort}`
|
||||
);
|
||||
|
||||
// apply right protocol on hosts
|
||||
vars.kbnTestServer.serverArgs = _.map(vars.kbnTestServer.serverArgs, (value) => {
|
||||
if (
|
||||
vars.servers.elasticsearch.protocol === 'https' &&
|
||||
value.includes('--elasticsearch.hosts=http')
|
||||
) {
|
||||
return value.replace('http', 'https');
|
||||
}
|
||||
|
||||
if (
|
||||
vars.servers.kibana.protocol === 'https' &&
|
||||
(value.includes('--elasticsearch.hosts=http') ||
|
||||
value.includes('--server.publicBaseUrl=http'))
|
||||
) {
|
||||
return value.replace('http', 'https');
|
||||
}
|
||||
|
||||
return value;
|
||||
});
|
||||
|
||||
if (
|
||||
specFileFTRConfig?.enableExperimental?.length &&
|
||||
_.some(vars.kbnTestServer.serverArgs, (value) =>
|
||||
value.includes('--xpack.securitySolution.enableExperimental')
|
||||
)
|
||||
) {
|
||||
vars.kbnTestServer.serverArgs = _.filter(
|
||||
vars.kbnTestServer.serverArgs,
|
||||
(value) => !value.includes('--xpack.securitySolution.enableExperimental')
|
||||
);
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.securitySolution.enableExperimental=${JSON.stringify(
|
||||
specFileFTRConfig?.enableExperimental
|
||||
)}`
|
||||
);
|
||||
}
|
||||
|
||||
if (specFileFTRConfig?.license) {
|
||||
if (vars.serverless) {
|
||||
log.warning(
|
||||
`'ftrConfig.license' ignored. Value does not apply to kibana when running in serverless.\nFile: ${specFilePath}`
|
||||
);
|
||||
} else {
|
||||
vars.esTestCluster.license = specFileFTRConfig.license;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasFleetServerArgs) {
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.fleet.agents.fleet_server.hosts=["https://${hostRealIp}:${fleetServerPort}"]`
|
||||
);
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.fleet.agents.elasticsearch.host=http://${hostRealIp}:${esPort}`
|
||||
);
|
||||
|
||||
if (vars.serverless) {
|
||||
vars.kbnTestServer.serverArgs.push(`--xpack.fleet.internal.fleetServerStandalone=false`);
|
||||
}
|
||||
}
|
||||
|
||||
// Serverless Specific
|
||||
if (vars.serverless) {
|
||||
log.info(`Serverless mode detected`);
|
||||
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--elasticsearch.hosts=https://localhost:${esPort}`,
|
||||
`--server.publicBaseUrl=https://localhost:${kibanaPort}`
|
||||
);
|
||||
vars.esTestCluster.serverArgs.push(
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.sp.entity_id=http://host.docker.internal:${kibanaPort}`,
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.sp.logout=http://host.docker.internal:${kibanaPort}/logout`,
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.sp.acs=http://host.docker.internal:${kibanaPort}/api/security/saml/callback`
|
||||
);
|
||||
} else {
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--elasticsearch.hosts=http://localhost:${esPort}`,
|
||||
`--server.publicBaseUrl=http://localhost:${kibanaPort}`
|
||||
);
|
||||
}
|
||||
|
||||
if (specFileFTRConfig?.productTypes) {
|
||||
if (vars.serverless) {
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.securitySolutionServerless.productTypes=${JSON.stringify([
|
||||
...specFileFTRConfig.productTypes,
|
||||
// Why spread it twice?
|
||||
// The `serverless.security.yml` file by default includes two product types as of this change.
|
||||
// Because it's an array, we need to ensure that existing values are "removed" and the ones
|
||||
// defined here are added. To do that, we duplicate the `productTypes` passed so that all array
|
||||
// elements in that YAML file are updated. The Security serverless plugin has code in place to
|
||||
// dedupe.
|
||||
...specFileFTRConfig.productTypes,
|
||||
])}`
|
||||
);
|
||||
} else {
|
||||
log.warning(
|
||||
`'ftrConfig.productTypes' ignored. Value applies only when running kibana is serverless.\nFile: ${specFilePath}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return vars;
|
||||
}
|
||||
);
|
|
@ -17,13 +17,7 @@ import { findChangedFiles } from 'find-cypress-specs';
|
|||
import path from 'path';
|
||||
import grep from '@cypress/grep/src/plugin';
|
||||
|
||||
import {
|
||||
EsVersion,
|
||||
FunctionalTestRunner,
|
||||
readConfigFile,
|
||||
runElasticsearch,
|
||||
runKibanaServer,
|
||||
} from '@kbn/test';
|
||||
import { EsVersion, FunctionalTestRunner, runElasticsearch, runKibanaServer } from '@kbn/test';
|
||||
|
||||
import {
|
||||
Lifecycle,
|
||||
|
@ -34,8 +28,8 @@ import {
|
|||
import { createFailError } from '@kbn/dev-cli-errors';
|
||||
import pRetry from 'p-retry';
|
||||
import { renderSummaryTable } from './print_run';
|
||||
import { getLocalhostRealIp } from '../endpoint/common/localhost_services';
|
||||
import { isSkipped, parseTestFileConfig } from './utils';
|
||||
import { getFTRConfig } from './get_ftr_config';
|
||||
|
||||
/**
|
||||
* Retrieve test files using a glob pattern.
|
||||
|
@ -69,34 +63,35 @@ const retrieveIntegrations = (integrationsPaths: string[]) => {
|
|||
export const cli = () => {
|
||||
run(
|
||||
async () => {
|
||||
const { argv } = yargs(process.argv.slice(2)).coerce('env', (arg: string) =>
|
||||
arg.split(',').reduce((acc, curr) => {
|
||||
const [key, value] = curr.split('=');
|
||||
if (key === 'burn') {
|
||||
acc[key] = parseInt(value, 10);
|
||||
} else {
|
||||
acc[key] = value;
|
||||
}
|
||||
return acc;
|
||||
}, {} as Record<string, string | number>)
|
||||
);
|
||||
const { argv } = yargs(process.argv.slice(2))
|
||||
.coerce('spec', (arg) => (_.isArray(arg) ? [_.last(arg)] : [arg]))
|
||||
.coerce('env', (arg: string) =>
|
||||
arg.split(',').reduce((acc, curr) => {
|
||||
const [key, value] = curr.split('=');
|
||||
if (key === 'burn') {
|
||||
acc[key] = parseInt(value, 10);
|
||||
} else {
|
||||
acc[key] = value;
|
||||
}
|
||||
return acc;
|
||||
}, {} as Record<string, string | number>)
|
||||
);
|
||||
|
||||
const isOpen = argv._[0] === 'open';
|
||||
const cypressConfigFilePath = require.resolve(
|
||||
`../../${_.isArray(argv.configFile) ? _.last(argv.configFile) : argv.configFile}`
|
||||
) as string;
|
||||
const cypressConfigFile = await import(cypressConfigFilePath);
|
||||
const spec: string | undefined = argv?.spec as string;
|
||||
const grepSpecPattern = grep({
|
||||
...cypressConfigFile,
|
||||
specPattern: spec ?? cypressConfigFile.e2e.specPattern,
|
||||
specPattern: argv.spec ?? cypressConfigFile.e2e.specPattern,
|
||||
excludeSpecPattern: [],
|
||||
}).specPattern;
|
||||
|
||||
let files = retrieveIntegrations(
|
||||
_.isArray(grepSpecPattern)
|
||||
? grepSpecPattern
|
||||
: globby.sync(spec ? [spec] : cypressConfigFile.e2e.specPattern)
|
||||
: globby.sync(argv.spec ?? cypressConfigFile.e2e.specPattern)
|
||||
);
|
||||
|
||||
if (argv.changedSpecsOnly) {
|
||||
|
@ -177,8 +172,6 @@ export const cli = () => {
|
|||
_.pull(fleetServerPorts, fleetServerPort);
|
||||
};
|
||||
|
||||
const hostRealIp = getLocalhostRealIp();
|
||||
|
||||
await pMap(
|
||||
files,
|
||||
async (filePath) => {
|
||||
|
@ -197,121 +190,22 @@ export const cli = () => {
|
|||
const esPort: number = getEsPort();
|
||||
const kibanaPort: number = getKibanaPort();
|
||||
const fleetServerPort: number = getFleetServerPort();
|
||||
const configFromTestFile = parseTestFileConfig(filePath);
|
||||
|
||||
const config = await readConfigFile(
|
||||
log,
|
||||
EsVersion.getDefault(),
|
||||
path.resolve(
|
||||
_.isArray(argv.ftrConfigFile) ? _.last(argv.ftrConfigFile) : argv.ftrConfigFile
|
||||
),
|
||||
{
|
||||
servers: {
|
||||
elasticsearch: {
|
||||
port: esPort,
|
||||
},
|
||||
kibana: {
|
||||
port: kibanaPort,
|
||||
},
|
||||
fleetserver: {
|
||||
port: fleetServerPort,
|
||||
},
|
||||
},
|
||||
kbnTestServer: {
|
||||
serverArgs: [
|
||||
`--server.port=${kibanaPort}`,
|
||||
`--elasticsearch.hosts=http://localhost:${esPort}`,
|
||||
],
|
||||
},
|
||||
},
|
||||
(vars) => {
|
||||
const hasFleetServerArgs = _.some(
|
||||
vars.kbnTestServer.serverArgs,
|
||||
(value) =>
|
||||
value.includes('--xpack.fleet.agents.fleet_server.hosts') ||
|
||||
value.includes('--xpack.fleet.agents.elasticsearch.host')
|
||||
);
|
||||
|
||||
vars.kbnTestServer.serverArgs = _.filter(
|
||||
vars.kbnTestServer.serverArgs,
|
||||
(value) =>
|
||||
!(
|
||||
value.includes('--elasticsearch.hosts=http://localhost:9220') ||
|
||||
value.includes('--xpack.fleet.agents.fleet_server.hosts') ||
|
||||
value.includes('--xpack.fleet.agents.elasticsearch.host')
|
||||
)
|
||||
);
|
||||
|
||||
if (
|
||||
configFromTestFile?.enableExperimental?.length &&
|
||||
_.some(vars.kbnTestServer.serverArgs, (value) =>
|
||||
value.includes('--xpack.securitySolution.enableExperimental')
|
||||
)
|
||||
) {
|
||||
vars.kbnTestServer.serverArgs = _.filter(
|
||||
vars.kbnTestServer.serverArgs,
|
||||
(value) => !value.includes('--xpack.securitySolution.enableExperimental')
|
||||
);
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.securitySolution.enableExperimental=${JSON.stringify(
|
||||
configFromTestFile?.enableExperimental
|
||||
)}`
|
||||
);
|
||||
}
|
||||
|
||||
if (configFromTestFile?.license) {
|
||||
if (vars.serverless) {
|
||||
log.warning(
|
||||
`'ftrConfig.license' ignored. Value does not apply to kibana when running in serverless.\nFile: ${filePath}`
|
||||
);
|
||||
} else {
|
||||
vars.esTestCluster.license = configFromTestFile.license;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasFleetServerArgs) {
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.fleet.agents.fleet_server.hosts=["https://${hostRealIp}:${fleetServerPort}"]`
|
||||
);
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.fleet.agents.elasticsearch.host=http://${hostRealIp}:${esPort}`
|
||||
);
|
||||
|
||||
if (vars.serverless) {
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.fleet.internal.fleetServerStandalone=false`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Serverless Specific
|
||||
if (vars.serverless) {
|
||||
log.info(`Serverless mode detected`);
|
||||
|
||||
if (configFromTestFile?.productTypes) {
|
||||
vars.kbnTestServer.serverArgs.push(
|
||||
`--xpack.securitySolutionServerless.productTypes=${JSON.stringify([
|
||||
...configFromTestFile.productTypes,
|
||||
// Why spread it twice?
|
||||
// The `serverless.security.yml` file by default includes two product types as of this change.
|
||||
// Because it's an array, we need to ensure that existing values are "removed" and the ones
|
||||
// defined here are added. To do that, we duplicate the `productTypes` passed so that all array
|
||||
// elements in that YAML file are updated. The Security serverless plugin has code in place to
|
||||
// dedupe.
|
||||
...configFromTestFile.productTypes,
|
||||
])}`
|
||||
);
|
||||
}
|
||||
} else if (configFromTestFile?.productTypes) {
|
||||
log.warning(
|
||||
`'ftrConfig.productTypes' ignored. Value applies only when running kibana is serverless.\nFile: ${filePath}`
|
||||
);
|
||||
}
|
||||
|
||||
return vars;
|
||||
}
|
||||
const specFileFTRConfig = parseTestFileConfig(filePath);
|
||||
const ftrConfigFilePath = path.resolve(
|
||||
_.isArray(argv.ftrConfigFile) ? _.last(argv.ftrConfigFile) : argv.ftrConfigFile
|
||||
);
|
||||
|
||||
const config = await getFTRConfig({
|
||||
log,
|
||||
esPort,
|
||||
kibanaPort,
|
||||
fleetServerPort,
|
||||
ftrConfigFilePath,
|
||||
specFilePath: filePath,
|
||||
specFileFTRConfig,
|
||||
isOpen,
|
||||
});
|
||||
|
||||
log.info(`
|
||||
----------------------------------------------
|
||||
Cypress FTR setup for file: ${filePath}:
|
||||
|
@ -344,26 +238,22 @@ ${JSON.stringify(config.getAll(), null, 2)}
|
|||
config,
|
||||
log,
|
||||
name: `ftr-${esPort}`,
|
||||
esFrom: 'snapshot',
|
||||
esFrom: config.get('esTestCluster')?.from || 'snapshot',
|
||||
onEarlyExit,
|
||||
}),
|
||||
{ retries: 2, forever: false }
|
||||
);
|
||||
|
||||
await pRetry(
|
||||
async () =>
|
||||
runKibanaServer({
|
||||
procs,
|
||||
config,
|
||||
installDir: options?.installDir,
|
||||
extraKbnOpts:
|
||||
options?.installDir || options?.ci || !isOpen
|
||||
? []
|
||||
: ['--dev', '--no-dev-config', '--no-dev-credentials'],
|
||||
onEarlyExit,
|
||||
}),
|
||||
{ retries: 2, forever: false }
|
||||
);
|
||||
await runKibanaServer({
|
||||
procs,
|
||||
config,
|
||||
installDir: options?.installDir,
|
||||
extraKbnOpts:
|
||||
options?.installDir || options?.ci || !isOpen
|
||||
? []
|
||||
: ['--dev', '--no-dev-config', '--no-dev-credentials'],
|
||||
onEarlyExit,
|
||||
});
|
||||
|
||||
await providers.loadAll();
|
||||
|
||||
|
@ -432,6 +322,8 @@ ${JSON.stringify(config.getAll(), null, 2)}
|
|||
KIBANA_USERNAME: config.get('servers.kibana.username'),
|
||||
KIBANA_PASSWORD: config.get('servers.kibana.password'),
|
||||
|
||||
IS_SERVERLESS: config.get('serverless'),
|
||||
|
||||
...argv.env,
|
||||
};
|
||||
|
||||
|
|
|
@ -25,33 +25,37 @@ import { GET_TIMELINE_HEADER } from '../../screens/timeline';
|
|||
const alertRunTimeField = 'field.name.alert.page';
|
||||
const timelineRuntimeField = 'field.name.timeline';
|
||||
|
||||
describe('Create DataView runtime field', { tags: ['@ess', '@serverless'] }, () => {
|
||||
before(() => {
|
||||
deleteRuntimeField('security-solution-default', alertRunTimeField);
|
||||
deleteRuntimeField('security-solution-default', timelineRuntimeField);
|
||||
});
|
||||
describe(
|
||||
'Create DataView runtime field',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
before(() => {
|
||||
deleteRuntimeField('security-solution-default', alertRunTimeField);
|
||||
deleteRuntimeField('security-solution-default', timelineRuntimeField);
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
login();
|
||||
});
|
||||
beforeEach(() => {
|
||||
login();
|
||||
});
|
||||
|
||||
it('adds field to alert table', () => {
|
||||
visit(ALERTS_URL);
|
||||
createRule(getNewRule());
|
||||
refreshPage();
|
||||
waitForAlertsToPopulate();
|
||||
openAlertsFieldBrowser();
|
||||
createField(alertRunTimeField);
|
||||
cy.get(GET_DATA_GRID_HEADER(alertRunTimeField)).should('exist');
|
||||
});
|
||||
it('adds field to alert table', () => {
|
||||
visit(ALERTS_URL);
|
||||
createRule(getNewRule());
|
||||
refreshPage();
|
||||
waitForAlertsToPopulate();
|
||||
openAlertsFieldBrowser();
|
||||
createField(alertRunTimeField);
|
||||
cy.get(GET_DATA_GRID_HEADER(alertRunTimeField)).should('exist');
|
||||
});
|
||||
|
||||
it('adds field to timeline', () => {
|
||||
visit(HOSTS_URL);
|
||||
openTimelineUsingToggle();
|
||||
populateTimeline();
|
||||
openTimelineFieldsBrowser();
|
||||
it('adds field to timeline', () => {
|
||||
visit(HOSTS_URL);
|
||||
openTimelineUsingToggle();
|
||||
populateTimeline();
|
||||
openTimelineFieldsBrowser();
|
||||
|
||||
createField(timelineRuntimeField);
|
||||
cy.get(GET_TIMELINE_HEADER(timelineRuntimeField)).should('exist');
|
||||
});
|
||||
});
|
||||
createField(timelineRuntimeField);
|
||||
cy.get(GET_TIMELINE_HEADER(timelineRuntimeField)).should('exist');
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -40,7 +40,8 @@ describe('Sourcerer', () => {
|
|||
cy.task('esArchiverResetKibana');
|
||||
dataViews.forEach((dataView: string) => postDataView(dataView));
|
||||
});
|
||||
describe('permissions', { tags: '@ess' }, () => {
|
||||
|
||||
describe('permissions', { tags: ['@ess', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
createUsersAndRoles(usersToCreate, rolesToCreate);
|
||||
});
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import {
|
||||
DEFAULT_ALERTS_INDEX,
|
||||
DEFAULT_INDEX_PATTERN,
|
||||
} from '@kbn/security-solution-plugin/common/constants';
|
||||
|
||||
import { login, visit } from '../../tasks/login';
|
||||
|
||||
import { TIMELINES_URL } from '../../urls/navigation';
|
||||
import {
|
||||
clickAlertCheckbox,
|
||||
deselectSourcererOptions,
|
||||
isDataViewSelection,
|
||||
isKibanaDataViewOption,
|
||||
isNotSourcererOption,
|
||||
isNotSourcererSelection,
|
||||
isSourcererOptions,
|
||||
isSourcererSelection,
|
||||
openAdvancedSettings,
|
||||
openDataViewSelection,
|
||||
openSourcerer,
|
||||
refreshUntilAlertsIndexExists,
|
||||
resetSourcerer,
|
||||
saveSourcerer,
|
||||
} from '../../tasks/sourcerer';
|
||||
import { openTimelineUsingToggle } from '../../tasks/security_main';
|
||||
import { SOURCERER } from '../../screens/sourcerer';
|
||||
import { createTimeline } from '../../tasks/api_calls/timelines';
|
||||
import { getTimeline, getTimelineModifiedSourcerer } from '../../objects/timeline';
|
||||
import { closeTimeline, openTimelineById } from '../../tasks/timeline';
|
||||
|
||||
const siemDataViewTitle = 'Security Default Data View';
|
||||
const dataViews = ['auditbeat-*,fakebeat-*', 'auditbeat-*,*beat*,siem-read*,.kibana*,fakebeat-*'];
|
||||
|
||||
describe('Timeline scope', { tags: '@brokenInServerless' }, () => {
|
||||
beforeEach(() => {
|
||||
cy.clearLocalStorage();
|
||||
login();
|
||||
visit(TIMELINES_URL);
|
||||
});
|
||||
|
||||
it('correctly loads SIEM data view', () => {
|
||||
openTimelineUsingToggle();
|
||||
openSourcerer('timeline');
|
||||
isDataViewSelection(siemDataViewTitle);
|
||||
openAdvancedSettings();
|
||||
isSourcererSelection(`auditbeat-*`);
|
||||
isSourcererSelection(`${DEFAULT_ALERTS_INDEX}-default`);
|
||||
isSourcererOptions(DEFAULT_INDEX_PATTERN.filter((pattern) => pattern !== 'auditbeat-*'));
|
||||
isNotSourcererOption(`${DEFAULT_ALERTS_INDEX}-default`);
|
||||
});
|
||||
|
||||
describe('Modified badge', () => {
|
||||
it('Selecting new data view does not add a modified badge', () => {
|
||||
openTimelineUsingToggle();
|
||||
cy.get(SOURCERER.badgeModified).should(`not.exist`);
|
||||
openSourcerer('timeline');
|
||||
cy.get(SOURCERER.badgeModifiedOption).should(`not.exist`);
|
||||
openDataViewSelection();
|
||||
isKibanaDataViewOption(dataViews);
|
||||
cy.get(SOURCERER.selectListDefaultOption).should(`contain`, siemDataViewTitle);
|
||||
cy.get(SOURCERER.selectListOption).contains(dataViews[1]).click();
|
||||
isDataViewSelection(dataViews[1]);
|
||||
saveSourcerer();
|
||||
cy.get(SOURCERER.badgeModified).should(`not.exist`);
|
||||
openSourcerer('timeline');
|
||||
cy.get(SOURCERER.badgeModifiedOption).should(`not.exist`);
|
||||
});
|
||||
|
||||
it('shows modified badge when index patterns change and removes when reset', () => {
|
||||
openTimelineUsingToggle();
|
||||
openSourcerer('timeline');
|
||||
openDataViewSelection();
|
||||
cy.get(SOURCERER.selectListOption).contains(dataViews[1]).click();
|
||||
isDataViewSelection(dataViews[1]);
|
||||
openAdvancedSettings();
|
||||
const patterns = dataViews[1].split(',');
|
||||
deselectSourcererOptions([patterns[0]]);
|
||||
saveSourcerer();
|
||||
cy.get(SOURCERER.badgeModified).should(`exist`);
|
||||
openSourcerer('timeline');
|
||||
cy.get(SOURCERER.badgeModifiedOption).should(`exist`);
|
||||
resetSourcerer();
|
||||
saveSourcerer();
|
||||
cy.get(SOURCERER.badgeModified).should(`not.exist`);
|
||||
openSourcerer('timeline');
|
||||
cy.get(SOURCERER.badgeModifiedOption).should(`not.exist`);
|
||||
isDataViewSelection(siemDataViewTitle);
|
||||
});
|
||||
});
|
||||
describe('Alerts checkbox', () => {
|
||||
before(() => {
|
||||
login();
|
||||
createTimeline(getTimeline()).then((response) =>
|
||||
cy.wrap(response.body.data.persistTimeline.timeline.savedObjectId).as('timelineId')
|
||||
);
|
||||
createTimeline(getTimelineModifiedSourcerer()).then((response) =>
|
||||
cy.wrap(response.body.data.persistTimeline.timeline.savedObjectId).as('auditbeatTimelineId')
|
||||
);
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(TIMELINES_URL);
|
||||
refreshUntilAlertsIndexExists();
|
||||
});
|
||||
|
||||
it('Modifies timeline to alerts only, and switches to different saved timeline without issue', function () {
|
||||
openTimelineById(this.timelineId).then(() => {
|
||||
cy.get(SOURCERER.badgeAlerts).should(`not.exist`);
|
||||
cy.get(SOURCERER.badgeModified).should(`not.exist`);
|
||||
openSourcerer('timeline');
|
||||
clickAlertCheckbox();
|
||||
saveSourcerer();
|
||||
cy.get(SOURCERER.badgeAlerts).should(`exist`);
|
||||
cy.get(SOURCERER.badgeModified).should(`not.exist`);
|
||||
closeTimeline();
|
||||
|
||||
openTimelineById(this.auditbeatTimelineId).then(() => {
|
||||
cy.get(SOURCERER.badgeModified).should(`exist`);
|
||||
cy.get(SOURCERER.badgeAlerts).should(`not.exist`);
|
||||
openSourcerer('timeline');
|
||||
openAdvancedSettings();
|
||||
isSourcererSelection(`auditbeat-*`);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
const defaultPatterns = [`auditbeat-*`, `${DEFAULT_ALERTS_INDEX}-default`];
|
||||
it('alerts checkbox behaves as expected', () => {
|
||||
isDataViewSelection(siemDataViewTitle);
|
||||
defaultPatterns.forEach((pattern) => isSourcererSelection(pattern));
|
||||
openDataViewSelection();
|
||||
cy.get(SOURCERER.selectListOption).contains(dataViews[1]).click();
|
||||
isDataViewSelection(dataViews[1]);
|
||||
dataViews[1]
|
||||
.split(',')
|
||||
.filter((pattern) => pattern !== 'fakebeat-*' && pattern !== 'siem-read*')
|
||||
.forEach((pattern) => isSourcererSelection(pattern));
|
||||
|
||||
clickAlertCheckbox();
|
||||
isNotSourcererSelection(`auditbeat-*`);
|
||||
isSourcererSelection(`${DEFAULT_ALERTS_INDEX}-default`);
|
||||
cy.get(SOURCERER.alertCheckbox).uncheck({ force: true });
|
||||
defaultPatterns.forEach((pattern) => isSourcererSelection(pattern));
|
||||
});
|
||||
|
||||
it('shows alerts badge when index patterns change and removes when reset', () => {
|
||||
clickAlertCheckbox();
|
||||
saveSourcerer();
|
||||
cy.get(SOURCERER.badgeAlerts).should(`exist`);
|
||||
openSourcerer('timeline');
|
||||
cy.get(SOURCERER.badgeAlertsOption).should(`exist`);
|
||||
resetSourcerer();
|
||||
saveSourcerer();
|
||||
cy.get(SOURCERER.badgeAlerts).should(`not.exist`);
|
||||
openSourcerer('timeline');
|
||||
cy.get(SOURCERER.badgeAlertsOption).should(`not.exist`);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -24,7 +24,7 @@ import {
|
|||
UNSELECTED_ALERT_TAG,
|
||||
} from '../../screens/alerts';
|
||||
|
||||
describe('Alert tagging', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('Alert tagging', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
cy.task('esArchiverResetKibana');
|
||||
|
|
|
@ -24,46 +24,50 @@ import {
|
|||
} from '../../screens/search_bar';
|
||||
import { TOASTER } from '../../screens/alerts_detection_rules';
|
||||
|
||||
describe('Histogram legend hover actions', { tags: ['@ess', '@serverless'] }, () => {
|
||||
const ruleConfigs = getNewRule();
|
||||
describe(
|
||||
'Histogram legend hover actions',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
const ruleConfigs = getNewRule();
|
||||
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
});
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
login();
|
||||
createRule(getNewRule({ rule_id: 'new custom rule' }));
|
||||
visit(ALERTS_URL);
|
||||
selectAlertsHistogram();
|
||||
});
|
||||
beforeEach(() => {
|
||||
login();
|
||||
createRule(getNewRule({ rule_id: 'new custom rule' }));
|
||||
visit(ALERTS_URL);
|
||||
selectAlertsHistogram();
|
||||
});
|
||||
|
||||
it('Filter in/out should add a filter to KQL bar', function () {
|
||||
const expectedNumberOfAlerts = 2;
|
||||
clickAlertsHistogramLegend();
|
||||
clickAlertsHistogramLegendFilterFor(ruleConfigs.name);
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM).should(
|
||||
'have.text',
|
||||
`kibana.alert.rule.name: ${ruleConfigs.name}`
|
||||
);
|
||||
cy.get(ALERTS_COUNT).should('have.text', `${expectedNumberOfAlerts} alerts`);
|
||||
it('Filter in/out should add a filter to KQL bar', function () {
|
||||
const expectedNumberOfAlerts = 2;
|
||||
clickAlertsHistogramLegend();
|
||||
clickAlertsHistogramLegendFilterFor(ruleConfigs.name);
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM).should(
|
||||
'have.text',
|
||||
`kibana.alert.rule.name: ${ruleConfigs.name}`
|
||||
);
|
||||
cy.get(ALERTS_COUNT).should('have.text', `${expectedNumberOfAlerts} alerts`);
|
||||
|
||||
clickAlertsHistogramLegend();
|
||||
clickAlertsHistogramLegendFilterOut(ruleConfigs.name);
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM).should(
|
||||
'have.text',
|
||||
`NOT kibana.alert.rule.name: ${ruleConfigs.name}`
|
||||
);
|
||||
cy.get(ALERTS_COUNT).should('not.exist');
|
||||
clickAlertsHistogramLegend();
|
||||
clickAlertsHistogramLegendFilterOut(ruleConfigs.name);
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM).should(
|
||||
'have.text',
|
||||
`NOT kibana.alert.rule.name: ${ruleConfigs.name}`
|
||||
);
|
||||
cy.get(ALERTS_COUNT).should('not.exist');
|
||||
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM_DELETE).click();
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM).should('not.exist');
|
||||
});
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM_DELETE).click();
|
||||
cy.get(GLOBAL_SEARCH_BAR_FILTER_ITEM).should('not.exist');
|
||||
});
|
||||
|
||||
it('Add To Timeline', function () {
|
||||
clickAlertsHistogramLegend();
|
||||
clickAlertsHistogramLegendAddToTimeline(ruleConfigs.name);
|
||||
it('Add To Timeline', function () {
|
||||
clickAlertsHistogramLegend();
|
||||
clickAlertsHistogramLegendAddToTimeline(ruleConfigs.name);
|
||||
|
||||
cy.get(TOASTER).should('have.text', `Added ${ruleConfigs.name} to timeline`);
|
||||
});
|
||||
});
|
||||
cy.get(TOASTER).should('have.text', `Added ${ruleConfigs.name} to timeline`);
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -28,9 +28,10 @@ import { openJsonView, openThreatIndicatorDetails } from '../../tasks/alerts_det
|
|||
import { DETECTIONS_RULE_MANAGEMENT_URL } from '../../urls/navigation';
|
||||
import { addsFieldsToTimeline } from '../../tasks/rule_details';
|
||||
|
||||
describe('CTI Enrichment', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('CTI Enrichment', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
// illegal_argument_exception: unknown setting [index.lifecycle.rollover_alias]
|
||||
cy.task('esArchiverLoad', { archiveName: 'threat_indicator' });
|
||||
cy.task('esArchiverLoad', { archiveName: 'suspicious_source_event' });
|
||||
login();
|
||||
|
|
|
@ -30,7 +30,7 @@ import { login, visit } from '../../tasks/login';
|
|||
|
||||
import { ALERTS_URL } from '../../urls/navigation';
|
||||
|
||||
describe('Enrichment', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('Enrichment', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
cy.task('esArchiverLoad', { archiveName: 'risk_users' });
|
||||
|
|
|
@ -14,53 +14,57 @@ import { TIMELINE_QUERY, TIMELINE_VIEW_IN_ANALYZER } from '../../screens/timelin
|
|||
import { selectAlertsHistogram } from '../../tasks/alerts';
|
||||
import { createTimeline } from '../../tasks/timelines';
|
||||
|
||||
describe('Ransomware Detection Alerts', { tags: ['@ess', '@serverless'] }, () => {
|
||||
before(() => {
|
||||
cy.task('esArchiverLoad', {
|
||||
archiveName: 'ransomware_detection',
|
||||
useCreate: true,
|
||||
docsOnly: true,
|
||||
});
|
||||
});
|
||||
|
||||
describe('Ransomware display in Alerts Section', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
describe('Alerts table', () => {
|
||||
it('shows Ransomware Alerts', () => {
|
||||
cy.get(ALERT_RULE_NAME).should('have.text', 'Ransomware Detection Alert');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Trend Chart', () => {
|
||||
beforeEach(() => {
|
||||
selectAlertsHistogram();
|
||||
});
|
||||
|
||||
it('shows Ransomware Detection Alert in the trend chart', () => {
|
||||
cy.get(ALERTS_HISTOGRAM_SERIES).should('have.text', 'Ransomware Detection Alert');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Ransomware in Timelines', () => {
|
||||
describe(
|
||||
'Ransomware Detection Alerts',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
before(() => {
|
||||
login();
|
||||
visit(TIMELINES_URL);
|
||||
createTimeline();
|
||||
cy.task('esArchiverLoad', {
|
||||
archiveName: 'ransomware_detection',
|
||||
useCreate: true,
|
||||
docsOnly: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('Renders ransomware entries in timelines table', () => {
|
||||
cy.get(TIMELINE_QUERY).type('event.code: "ransomware"{enter}');
|
||||
describe('Ransomware display in Alerts Section', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
// Wait for grid to load, it should have an analyzer icon
|
||||
cy.get(TIMELINE_VIEW_IN_ANALYZER).should('exist');
|
||||
describe('Alerts table', () => {
|
||||
it('shows Ransomware Alerts', () => {
|
||||
cy.get(ALERT_RULE_NAME).should('have.text', 'Ransomware Detection Alert');
|
||||
});
|
||||
});
|
||||
|
||||
cy.get(MESSAGE).should('have.text', 'Ransomware Detection Alert');
|
||||
describe('Trend Chart', () => {
|
||||
beforeEach(() => {
|
||||
selectAlertsHistogram();
|
||||
});
|
||||
|
||||
it('shows Ransomware Detection Alert in the trend chart', () => {
|
||||
cy.get(ALERTS_HISTOGRAM_SERIES).should('have.text', 'Ransomware Detection Alert');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Ransomware in Timelines', () => {
|
||||
before(() => {
|
||||
login();
|
||||
visit(TIMELINES_URL);
|
||||
createTimeline();
|
||||
});
|
||||
|
||||
it('Renders ransomware entries in timelines table', () => {
|
||||
cy.get(TIMELINE_QUERY).type('event.code: "ransomware"{enter}');
|
||||
|
||||
// Wait for grid to load, it should have an analyzer icon
|
||||
cy.get(TIMELINE_VIEW_IN_ANALYZER).should('exist');
|
||||
|
||||
cy.get(MESSAGE).should('have.text', 'Ransomware Detection Alert');
|
||||
});
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -15,59 +15,63 @@ import { selectAlertsHistogram } from '../../tasks/alerts';
|
|||
import { createTimeline } from '../../tasks/timelines';
|
||||
import { cleanKibana } from '../../tasks/common';
|
||||
|
||||
describe('Ransomware Prevention Alerts', { tags: ['@ess', '@serverless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
cy.task('esArchiverLoad', {
|
||||
archiveName: 'ransomware_prevention',
|
||||
useCreate: true,
|
||||
docsOnly: true,
|
||||
});
|
||||
});
|
||||
|
||||
after(() => {
|
||||
cy.task('esArchiverUnload', 'ransomware_prevention');
|
||||
});
|
||||
|
||||
describe('Ransomware display in Alerts Section', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
describe('Alerts table', () => {
|
||||
it('shows Ransomware Alerts', () => {
|
||||
cy.get(ALERT_RULE_NAME).should('have.text', 'Ransomware Prevention Alert');
|
||||
describe(
|
||||
'Ransomware Prevention Alerts',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
cy.task('esArchiverLoad', {
|
||||
archiveName: 'ransomware_prevention',
|
||||
useCreate: true,
|
||||
docsOnly: true,
|
||||
});
|
||||
});
|
||||
|
||||
describe('Trend Chart', () => {
|
||||
after(() => {
|
||||
cy.task('esArchiverUnload', 'ransomware_prevention');
|
||||
});
|
||||
|
||||
describe('Ransomware display in Alerts Section', () => {
|
||||
beforeEach(() => {
|
||||
selectAlertsHistogram();
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
it('shows Ransomware Prevention Alert in the trend chart', () => {
|
||||
cy.get(ALERTS_HISTOGRAM_SERIES).should('have.text', 'Ransomware Prevention Alert');
|
||||
describe('Alerts table', () => {
|
||||
it('shows Ransomware Alerts', () => {
|
||||
cy.get(ALERT_RULE_NAME).should('have.text', 'Ransomware Prevention Alert');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Trend Chart', () => {
|
||||
beforeEach(() => {
|
||||
selectAlertsHistogram();
|
||||
});
|
||||
|
||||
it('shows Ransomware Prevention Alert in the trend chart', () => {
|
||||
cy.get(ALERTS_HISTOGRAM_SERIES).should('have.text', 'Ransomware Prevention Alert');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Ransomware in Timelines', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(TIMELINES_URL);
|
||||
describe('Ransomware in Timelines', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(TIMELINES_URL);
|
||||
|
||||
createTimeline();
|
||||
createTimeline();
|
||||
});
|
||||
|
||||
it('Renders ransomware entries in timelines table', () => {
|
||||
cy.get(TIMELINE_QUERY).type('event.code: "ransomware"{enter}');
|
||||
|
||||
// Wait for grid to load, it should have an analyzer icon
|
||||
cy.get(TIMELINE_VIEW_IN_ANALYZER).should('exist');
|
||||
|
||||
cy.get(MESSAGE).should('have.text', 'Ransomware Prevention Alert');
|
||||
});
|
||||
});
|
||||
|
||||
it('Renders ransomware entries in timelines table', () => {
|
||||
cy.get(TIMELINE_QUERY).type('event.code: "ransomware"{enter}');
|
||||
|
||||
// Wait for grid to load, it should have an analyzer icon
|
||||
cy.get(TIMELINE_VIEW_IN_ANALYZER).should('exist');
|
||||
|
||||
cy.get(MESSAGE).should('have.text', 'Ransomware Prevention Alert');
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -24,7 +24,7 @@ const loadDetectionsPage = (role: ROLES) => {
|
|||
waitForAlertsToPopulate();
|
||||
};
|
||||
|
||||
describe('Alerts timeline', { tags: '@ess' }, () => {
|
||||
describe('Alerts timeline', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
// First we login as a privileged user to create alerts.
|
||||
cleanKibana();
|
||||
|
@ -34,7 +34,7 @@ describe('Alerts timeline', { tags: '@ess' }, () => {
|
|||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
context('Privileges: read only', { tags: '@ess' }, () => {
|
||||
context('Privileges: read only', () => {
|
||||
beforeEach(() => {
|
||||
loadDetectionsPage(ROLES.reader);
|
||||
});
|
||||
|
@ -52,7 +52,7 @@ describe('Alerts timeline', { tags: '@ess' }, () => {
|
|||
});
|
||||
});
|
||||
|
||||
context('Privileges: can crud', { tags: '@ess' }, () => {
|
||||
context('Privileges: can crud', () => {
|
||||
beforeEach(() => {
|
||||
loadDetectionsPage(ROLES.platform_engineer);
|
||||
cy.get(LOADING_INDICATOR).should('not.exist');
|
||||
|
|
|
@ -53,7 +53,7 @@ import { loginWithUser, visit, visitWithoutDateRange } from '../../../tasks/logi
|
|||
|
||||
import { CASES_URL, OVERVIEW_URL } from '../../../urls/navigation';
|
||||
|
||||
describe('Cases', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('Cases', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
createTimeline(getCase1().timeline).then((response) =>
|
||||
|
|
|
@ -48,7 +48,7 @@ const testCase: TestCaseWithoutTimeline = {
|
|||
owner: 'securitySolution',
|
||||
};
|
||||
|
||||
describe('Cases privileges', { tags: '@ess' }, () => {
|
||||
describe('Cases privileges', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
createUsersAndRoles(usersToCreate, rolesToCreate);
|
||||
|
|
|
@ -55,7 +55,7 @@ describe('Enable risk scores', { tags: ['@ess', '@serverless'] }, () => {
|
|||
cy.get(ENABLE_HOST_RISK_SCORE_BUTTON).should('exist');
|
||||
});
|
||||
|
||||
it('should install host risk score successfully', () => {
|
||||
it('should install host risk score successfully', { tags: ['@brokenInServerless'] }, () => {
|
||||
interceptInstallRiskScoreModule();
|
||||
clickEnableRiskScore(RiskScoreEntity.host);
|
||||
waitForInstallRiskScoreModule();
|
||||
|
@ -89,7 +89,7 @@ describe('Enable risk scores', { tags: ['@ess', '@serverless'] }, () => {
|
|||
cy.get(ENABLE_USER_RISK_SCORE_BUTTON).should('exist');
|
||||
});
|
||||
|
||||
it('should install user risk score successfully', () => {
|
||||
it('should install user risk score successfully', { tags: ['@brokenInServerless'] }, () => {
|
||||
interceptInstallRiskScoreModule();
|
||||
clickEnableRiskScore(RiskScoreEntity.user);
|
||||
waitForInstallRiskScoreModule();
|
||||
|
|
|
@ -59,31 +59,39 @@ describe('Upgrade risk scores', { tags: ['@ess', '@serverless'] }, () => {
|
|||
cy.get(UPGRADE_USER_RISK_SCORE_BUTTON).should('be.visible');
|
||||
});
|
||||
|
||||
it('should show a confirmation modal for upgrading host risk score and display a link to host risk score Elastic doc', () => {
|
||||
clickUpgradeRiskScore(RiskScoreEntity.host);
|
||||
cy.get(UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.host)).should('exist');
|
||||
it(
|
||||
'should show a confirmation modal for upgrading host risk score and display a link to host risk score Elastic doc',
|
||||
{ tags: ['@brokenInServerless'] },
|
||||
() => {
|
||||
clickUpgradeRiskScore(RiskScoreEntity.host);
|
||||
cy.get(UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.host)).should('exist');
|
||||
|
||||
cy.get(UPGRADE_CANCELLATION_BUTTON)
|
||||
.get(`${UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.host)} a`)
|
||||
.then((link) => {
|
||||
expect(link.prop('href')).to.eql(
|
||||
`https://www.elastic.co/guide/en/security/current/${RiskScoreEntity.host}-risk-score.html`
|
||||
);
|
||||
});
|
||||
});
|
||||
cy.get(UPGRADE_CANCELLATION_BUTTON)
|
||||
.get(`${UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.host)} a`)
|
||||
.then((link) => {
|
||||
expect(link.prop('href')).to.eql(
|
||||
`https://www.elastic.co/guide/en/security/current/${RiskScoreEntity.host}-risk-score.html`
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
it('should show a confirmation modal for upgrading user risk score and display a link to user risk score Elastic doc', () => {
|
||||
clickUpgradeRiskScore(RiskScoreEntity.user);
|
||||
cy.get(UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.user)).should('exist');
|
||||
it(
|
||||
'should show a confirmation modal for upgrading user risk score and display a link to user risk score Elastic doc',
|
||||
{ tags: ['@brokenInServerless'] },
|
||||
() => {
|
||||
clickUpgradeRiskScore(RiskScoreEntity.user);
|
||||
cy.get(UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.user)).should('exist');
|
||||
|
||||
cy.get(UPGRADE_CANCELLATION_BUTTON)
|
||||
.get(`${UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.user)} a`)
|
||||
.then((link) => {
|
||||
expect(link.prop('href')).to.eql(
|
||||
`https://www.elastic.co/guide/en/security/current/${RiskScoreEntity.user}-risk-score.html`
|
||||
);
|
||||
});
|
||||
});
|
||||
cy.get(UPGRADE_CANCELLATION_BUTTON)
|
||||
.get(`${UPGRADE_CONFIRMATION_MODAL(RiskScoreEntity.user)} a`)
|
||||
.then((link) => {
|
||||
expect(link.prop('href')).to.eql(
|
||||
`https://www.elastic.co/guide/en/security/current/${RiskScoreEntity.user}-risk-score.html`
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const versions: Array<'8.3' | '8.4'> = ['8.3', '8.4'];
|
||||
|
|
|
@ -10,9 +10,10 @@ import { login, visitHostDetailsPage } from '../../../tasks/login';
|
|||
import { cleanKibana, waitForTableToLoad } from '../../../tasks/common';
|
||||
import { TABLE_CELL, TABLE_ROWS } from '../../../screens/alerts_details';
|
||||
|
||||
describe('risk tab', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('risk tab', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
// illegal_argument_exception: unknown setting [index.lifecycle.rollover_alias]
|
||||
cy.task('esArchiverLoad', { archiveName: 'risk_hosts' });
|
||||
});
|
||||
|
||||
|
|
|
@ -53,7 +53,8 @@ describe('risk tab', { tags: ['@ess', '@brokenInServerless'] }, () => {
|
|||
removeCriticalFilterAndCloseRiskTableFilter();
|
||||
});
|
||||
|
||||
it('should be able to change items count per page', () => {
|
||||
// Flaky
|
||||
it.skip('should be able to change items count per page', () => {
|
||||
selectFiveItemsPerPageOption();
|
||||
|
||||
cy.get(HOST_BY_RISK_TABLE_HOSTNAME_CELL).should('have.length', 5);
|
||||
|
|
|
@ -12,9 +12,10 @@ import { cleanKibana } from '../../../tasks/common';
|
|||
import { TABLE_CELL } from '../../../screens/alerts_details';
|
||||
import { kqlSearch } from '../../../tasks/security_header';
|
||||
|
||||
describe('All hosts table', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('All hosts table', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
// illegal_argument_exception: unknown setting [index.lifecycle.name]
|
||||
cy.task('esArchiverLoad', { archiveName: 'risk_hosts' });
|
||||
});
|
||||
|
||||
|
|
|
@ -22,8 +22,9 @@ import { selectDataView } from '../../tasks/sourcerer';
|
|||
|
||||
const DATA_VIEW = 'auditbeat-*';
|
||||
|
||||
describe('Inspect Explore pages', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('Inspect Explore pages', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
// illegal_argument_exception: unknown setting [index.lifecycle.name]
|
||||
cy.task('esArchiverLoad', { archiveName: 'risk_users' });
|
||||
cy.task('esArchiverLoad', { archiveName: 'risk_hosts' });
|
||||
|
||||
|
|
|
@ -15,33 +15,37 @@ import { waitForAlertsToPopulate } from '../../../tasks/create_new_rule';
|
|||
import { login, visit } from '../../../tasks/login';
|
||||
import { ALERTS_URL } from '../../../urls/navigation';
|
||||
|
||||
describe('Alerts Table Action column', { tags: ['@ess', '@serverless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
cy.task('esArchiverLoad', {
|
||||
archiveName: 'process_ancestry',
|
||||
useCreate: true,
|
||||
docsOnly: true,
|
||||
describe(
|
||||
'Alerts Table Action column',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
cy.task('esArchiverLoad', {
|
||||
archiveName: 'process_ancestry',
|
||||
useCreate: true,
|
||||
docsOnly: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
after(() => {
|
||||
cy.task('esArchiverUnload', 'process_ancestry');
|
||||
});
|
||||
after(() => {
|
||||
cy.task('esArchiverUnload', 'process_ancestry');
|
||||
});
|
||||
|
||||
it('should have session viewer button visible & open session viewer on click', () => {
|
||||
openSessionViewerFromAlertTable();
|
||||
cy.get(OVERLAY_CONTAINER).should('be.visible');
|
||||
});
|
||||
it('should have session viewer button visible & open session viewer on click', () => {
|
||||
openSessionViewerFromAlertTable();
|
||||
cy.get(OVERLAY_CONTAINER).should('be.visible');
|
||||
});
|
||||
|
||||
it('should have analyzer button visible & open analyzer on click', () => {
|
||||
openAnalyzerForFirstAlertInTimeline();
|
||||
cy.get(OVERLAY_CONTAINER).should('be.visible');
|
||||
});
|
||||
});
|
||||
it('should have analyzer button visible & open analyzer on click', () => {
|
||||
openAnalyzerForFirstAlertInTimeline();
|
||||
cy.get(OVERLAY_CONTAINER).should('be.visible');
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -36,7 +36,7 @@ import { ALERT_SUMMARY_SEVERITY_DONUT_CHART } from '../../../screens/alerts';
|
|||
import { getLocalstorageEntryAsObject } from '../../../helpers/common';
|
||||
import { goToRuleDetails } from '../../../tasks/alerts_detection_rules';
|
||||
|
||||
describe('Alert details flyout', { tags: ['@ess', '@serverless'] }, () => {
|
||||
describe('Alert details flyout', { tags: ['@ess', '@serverless', '@brokenInServerless'] }, () => {
|
||||
describe('Basic functions', () => {
|
||||
beforeEach(() => {
|
||||
cleanKibana();
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { scrollWithinDocumentDetailsExpandableFlyoutRightSection } from '../../../../tasks/expandable_flyout/alert_details_right_panel_json_tab';
|
||||
import { openJsonTab } from '../../../../tasks/expandable_flyout/alert_details_right_panel';
|
||||
import { expandFirstAlertExpandableFlyout } from '../../../../tasks/expandable_flyout/common';
|
||||
import { DOCUMENT_DETAILS_FLYOUT_JSON_TAB_CONTENT } from '../../../../screens/expandable_flyout/alert_details_right_panel_json_tab';
|
||||
|
@ -31,10 +30,7 @@ describe(
|
|||
});
|
||||
|
||||
it('should display the json component', () => {
|
||||
// the json component is rendered within a dom element with overflow, so Cypress isn't finding it
|
||||
// this next line is a hack that vertically scrolls down to ensure Cypress finds it
|
||||
scrollWithinDocumentDetailsExpandableFlyoutRightSection(0, 7000);
|
||||
cy.get(DOCUMENT_DETAILS_FLYOUT_JSON_TAB_CONTENT).should('be.visible');
|
||||
cy.get(DOCUMENT_DETAILS_FLYOUT_JSON_TAB_CONTENT).should('exist');
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -27,76 +27,80 @@ import {
|
|||
} from '../../../screens/alerts_details';
|
||||
import { verifyInsightCount } from '../../../tasks/alerts_details';
|
||||
|
||||
describe('Investigate in timeline', { tags: ['@ess', '@serverless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
createRule(getNewRule());
|
||||
});
|
||||
|
||||
describe('From alerts table', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
describe(
|
||||
'Investigate in timeline',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
createRule(getNewRule());
|
||||
});
|
||||
|
||||
it('should open new timeline from alerts table', () => {
|
||||
investigateFirstAlertInTimeline();
|
||||
cy.get(PROVIDER_BADGE)
|
||||
.first()
|
||||
.invoke('text')
|
||||
.then((eventId) => {
|
||||
cy.get(PROVIDER_BADGE).filter(':visible').should('have.text', eventId);
|
||||
describe('From alerts table', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
it('should open new timeline from alerts table', () => {
|
||||
investigateFirstAlertInTimeline();
|
||||
cy.get(PROVIDER_BADGE)
|
||||
.first()
|
||||
.invoke('text')
|
||||
.then((eventId) => {
|
||||
cy.get(PROVIDER_BADGE).filter(':visible').should('have.text', eventId);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('From alerts details flyout', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
disableExpandableFlyout();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
expandFirstAlert();
|
||||
});
|
||||
|
||||
it('should open a new timeline from a prevalence field', () => {
|
||||
// Only one alert matches the exact process args in this case
|
||||
const alertCount = 1;
|
||||
|
||||
// Click on the last button that lets us investigate in timeline.
|
||||
// We expect this to be the `process.args` row.
|
||||
cy.get(ALERT_FLYOUT)
|
||||
.find(SUMMARY_VIEW_INVESTIGATE_IN_TIMELINE_BUTTON)
|
||||
.last()
|
||||
.should('have.text', alertCount)
|
||||
.click();
|
||||
|
||||
// Make sure a new timeline is created and opened
|
||||
cy.get(TIMELINE_TITLE).should('have.text', 'Untitled timeline');
|
||||
|
||||
// The alert count in this timeline should match the count shown on the alert flyout
|
||||
cy.get(QUERY_TAB_BUTTON).should('contain.text', alertCount);
|
||||
|
||||
// The correct filter is applied to the timeline query
|
||||
cy.get(FILTER_BADGE).should(
|
||||
'have.text',
|
||||
' {"bool":{"must":[{"term":{"process.args":"-zsh"}},{"term":{"process.args":"unique"}}]}}'
|
||||
);
|
||||
});
|
||||
|
||||
it('should open a new timeline from an insights module', () => {
|
||||
verifyInsightCount({
|
||||
tableSelector: INSIGHTS_RELATED_ALERTS_BY_SESSION,
|
||||
investigateSelector: INSIGHTS_INVESTIGATE_IN_TIMELINE_BUTTON,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('From alerts details flyout', () => {
|
||||
beforeEach(() => {
|
||||
login();
|
||||
disableExpandableFlyout();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
expandFirstAlert();
|
||||
});
|
||||
|
||||
it('should open a new timeline from a prevalence field', () => {
|
||||
// Only one alert matches the exact process args in this case
|
||||
const alertCount = 1;
|
||||
|
||||
// Click on the last button that lets us investigate in timeline.
|
||||
// We expect this to be the `process.args` row.
|
||||
cy.get(ALERT_FLYOUT)
|
||||
.find(SUMMARY_VIEW_INVESTIGATE_IN_TIMELINE_BUTTON)
|
||||
.last()
|
||||
.should('have.text', alertCount)
|
||||
.click();
|
||||
|
||||
// Make sure a new timeline is created and opened
|
||||
cy.get(TIMELINE_TITLE).should('have.text', 'Untitled timeline');
|
||||
|
||||
// The alert count in this timeline should match the count shown on the alert flyout
|
||||
cy.get(QUERY_TAB_BUTTON).should('contain.text', alertCount);
|
||||
|
||||
// The correct filter is applied to the timeline query
|
||||
cy.get(FILTER_BADGE).should(
|
||||
'have.text',
|
||||
' {"bool":{"must":[{"term":{"process.args":"-zsh"}},{"term":{"process.args":"unique"}}]}}'
|
||||
);
|
||||
});
|
||||
|
||||
it('should open a new timeline from an insights module', () => {
|
||||
verifyInsightCount({
|
||||
tableSelector: INSIGHTS_RELATED_ALERTS_BY_SESSION,
|
||||
investigateSelector: INSIGHTS_INVESTIGATE_IN_TIMELINE_BUTTON,
|
||||
it('should open a new timeline with alert ids from the process ancestry', () => {
|
||||
verifyInsightCount({
|
||||
tableSelector: INSIGHTS_RELATED_ALERTS_BY_ANCESTRY,
|
||||
investigateSelector: INSIGHTS_INVESTIGATE_ANCESTRY_ALERTS_IN_TIMELINE_BUTTON,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should open a new timeline with alert ids from the process ancestry', () => {
|
||||
verifyInsightCount({
|
||||
tableSelector: INSIGHTS_RELATED_ALERTS_BY_ANCESTRY,
|
||||
investigateSelector: INSIGHTS_INVESTIGATE_ANCESTRY_ALERTS_IN_TIMELINE_BUTTON,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -17,29 +17,33 @@ import { waitForAlertsToPopulate } from '../../../tasks/create_new_rule';
|
|||
import { login, visit } from '../../../tasks/login';
|
||||
import { ALERTS_URL } from '../../../urls/navigation';
|
||||
|
||||
describe('Analyze events view for alerts', { tags: ['@ess', '@serverless'] }, () => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
createRule(getNewRule());
|
||||
});
|
||||
describe(
|
||||
'Analyze events view for alerts',
|
||||
{ tags: ['@ess', '@serverless', '@brokenInServerless'] },
|
||||
() => {
|
||||
before(() => {
|
||||
cleanKibana();
|
||||
createRule(getNewRule());
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
beforeEach(() => {
|
||||
login();
|
||||
visit(ALERTS_URL);
|
||||
waitForAlertsToPopulate();
|
||||
});
|
||||
|
||||
it('should render when button is clicked', () => {
|
||||
openAnalyzerForFirstAlertInTimeline();
|
||||
cy.get(ANALYZER_NODE).first().should('be.visible');
|
||||
});
|
||||
it('should render when button is clicked', () => {
|
||||
openAnalyzerForFirstAlertInTimeline();
|
||||
cy.get(ANALYZER_NODE).first().should('be.visible');
|
||||
});
|
||||
|
||||
it('should display a toast indicating the date range of found events when a time range has 0 events in it', () => {
|
||||
const dateContainingZeroEvents = 'Jul 27, 2022 @ 00:00:00.000';
|
||||
setStartDate(dateContainingZeroEvents);
|
||||
waitForAlertsToPopulate();
|
||||
openAnalyzerForFirstAlertInTimeline();
|
||||
cy.get(TOASTER).should('be.visible');
|
||||
cy.get(ANALYZER_NODE).first().should('be.visible');
|
||||
});
|
||||
});
|
||||
it('should display a toast indicating the date range of found events when a time range has 0 events in it', () => {
|
||||
const dateContainingZeroEvents = 'Jul 27, 2022 @ 00:00:00.000';
|
||||
setStartDate(dateContainingZeroEvents);
|
||||
waitForAlertsToPopulate();
|
||||
openAnalyzerForFirstAlertInTimeline();
|
||||
cy.get(TOASTER).should('be.visible');
|
||||
cy.get(ANALYZER_NODE).first().should('be.visible');
|
||||
});
|
||||
}
|
||||
);
|
||||
|
|
|
@ -33,7 +33,7 @@ describe('Bulk Investigate in Timeline', { tags: ['@ess', '@serverless'] }, () =
|
|||
cy.task('esArchiverUnload', 'bulk_process');
|
||||
});
|
||||
|
||||
context('Alerts', () => {
|
||||
context('Alerts', { tags: ['@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
createRule(getNewRule());
|
||||
});
|
||||
|
|
|
@ -52,7 +52,7 @@ describe(
|
|||
);
|
||||
});
|
||||
});
|
||||
it('Filter out', () => {
|
||||
it('Filter out', { tags: ['@brokenInServerless'] }, () => {
|
||||
cy.get(GET_DISCOVER_DATA_GRID_CELL(TIMESTAMP_COLUMN_NAME, 0)).then((sub) => {
|
||||
const selectedTimestamp = sub.text();
|
||||
cy.get(GET_DISCOVER_DATA_GRID_CELL(TIMESTAMP_COLUMN_NAME, 0)).realHover();
|
||||
|
|
|
@ -30,8 +30,9 @@ describe('CTI Link Panel', { tags: ['@ess', '@serverless'] }, () => {
|
|||
.and('match', /app\/integrations\/browse\/threat_intel/);
|
||||
});
|
||||
|
||||
describe('enabled threat intel module', () => {
|
||||
describe('enabled threat intel module', { tags: ['@brokenInServerless'] }, () => {
|
||||
before(() => {
|
||||
// illegal_argument_exception: unknown setting [index.lifecycle.name]
|
||||
cy.task('esArchiverLoad', { archiveName: 'threat_indicator' });
|
||||
});
|
||||
|
||||
|
|
|
@ -5,10 +5,12 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import Fs from 'fs';
|
||||
import * as Url from 'url';
|
||||
import { EsArchiver } from '@kbn/es-archiver';
|
||||
import { KbnClient } from '@kbn/test';
|
||||
import { Client, HttpConnection } from '@elastic/elasticsearch';
|
||||
import { createEsClientForTesting, KbnClient, systemIndicesSuperuser } from '@kbn/test';
|
||||
import { ToolingLog } from '@kbn/tooling-log';
|
||||
import { CA_CERT_PATH } from '@kbn/dev-utils';
|
||||
|
||||
export const esArchiver = (
|
||||
on: Cypress.PluginEvents,
|
||||
|
@ -16,14 +18,20 @@ export const esArchiver = (
|
|||
): EsArchiver => {
|
||||
const log = new ToolingLog({ level: 'verbose', writeTo: process.stdout });
|
||||
|
||||
const client = new Client({
|
||||
node: config.env.ELASTICSEARCH_URL,
|
||||
Connection: HttpConnection,
|
||||
const isServerless = config.env.IS_SERVERLESS;
|
||||
|
||||
const client = createEsClientForTesting({
|
||||
esUrl: Url.format(config.env.ELASTICSEARCH_URL),
|
||||
// Use system indices user so tests can write to system indices
|
||||
authOverride: !isServerless ? systemIndicesSuperuser : undefined,
|
||||
});
|
||||
|
||||
const kbnClient = new KbnClient({
|
||||
log,
|
||||
url: config.env.CYPRESS_BASE_URL as string,
|
||||
...(config.env.ELASTICSEARCH_URL.includes('https')
|
||||
? { certificateAuthorities: [Fs.readFileSync(CA_CERT_PATH)] }
|
||||
: {}),
|
||||
});
|
||||
|
||||
const esArchiverInstance = new EsArchiver({
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import { DOCUMENT_DETAILS_FLYOUT_RIGHT_PANEL_CONTENT } from '../../screens/expandable_flyout/alert_details_right_panel_json_tab';
|
||||
|
||||
/**
|
||||
* Scroll to x-y positions within the right section of the document details expandable flyout
|
||||
* // TODO revisit this as it seems very fragile: the first element found is the timeline flyout, which isn't visible but still exist in the DOM
|
||||
*/
|
||||
export const scrollWithinDocumentDetailsExpandableFlyoutRightSection = (x: number, y: number) =>
|
||||
cy.get(DOCUMENT_DETAILS_FLYOUT_RIGHT_PANEL_CONTENT).last().scrollTo(x, y);
|
|
@ -43,6 +43,7 @@
|
|||
"@kbn/fleet-plugin",
|
||||
"@kbn/cases-components",
|
||||
"@kbn/security-solution-plugin",
|
||||
"@kbn/dev-utils",
|
||||
"@kbn/expandable-flyout",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -35,7 +35,8 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
const esClient = getService('es');
|
||||
const esDeleteAllIndices = getService('esDeleteAllIndices');
|
||||
|
||||
describe('Alerting rules', () => {
|
||||
// Issue: https://github.com/elastic/kibana/issues/165145
|
||||
describe.skip('Alerting rules', () => {
|
||||
const RULE_TYPE_ID = '.es-query';
|
||||
const ALERT_ACTION_INDEX = 'alert-action-es-query';
|
||||
let actionId: string;
|
||||
|
|
|
@ -134,16 +134,17 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
// expect success because we're using the internal header
|
||||
expect(body).toEqual({
|
||||
authentication_provider: { name: '__http__', type: 'http' },
|
||||
authentication_realm: { name: 'reserved', type: 'reserved' },
|
||||
authentication_realm: { name: 'file1', type: 'file' },
|
||||
authentication_type: 'realm',
|
||||
elastic_cloud_user: false,
|
||||
email: null,
|
||||
enabled: true,
|
||||
full_name: null,
|
||||
lookup_realm: { name: 'reserved', type: 'reserved' },
|
||||
metadata: { _reserved: true },
|
||||
lookup_realm: { name: 'file1', type: 'file' },
|
||||
metadata: {},
|
||||
operator: true,
|
||||
roles: ['superuser'],
|
||||
username: 'elastic',
|
||||
username: 'elastic_serverless',
|
||||
});
|
||||
expect(status).toBe(200);
|
||||
});
|
||||
|
|
|
@ -67,7 +67,8 @@ async function uploadSourcemap(apmApiClient: any) {
|
|||
export default function ({ getService }: APMFtrContextProvider) {
|
||||
const apmApiClient = getService('apmApiClient');
|
||||
|
||||
describe('apm feature flags', () => {
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
describe.skip('apm feature flags', () => {
|
||||
describe('fleet migrations', () => {
|
||||
it('rejects requests to save apm server schema', async () => {
|
||||
try {
|
||||
|
|
|
@ -112,7 +112,7 @@ export const deleteMappings = async (es: Client): Promise<void> => {
|
|||
});
|
||||
};
|
||||
|
||||
export const defaultUser = { email: null, full_name: null, username: 'elastic' };
|
||||
export const defaultUser = { email: null, full_name: null, username: 'elastic_serverless' };
|
||||
/**
|
||||
* A null filled user will occur when the security plugin is disabled
|
||||
*/
|
||||
|
|
|
@ -20,7 +20,9 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
const dataViewApi = getService('dataViewApi');
|
||||
const logger = getService('log');
|
||||
|
||||
describe('Threshold rule - AVG - PCT - FIRED', () => {
|
||||
// Blocked API: index_not_found_exception: no such index [.alerts-observability.threshold.alerts-default]
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
describe.skip('Threshold rule - AVG - PCT - FIRED', () => {
|
||||
const THRESHOLD_RULE_ALERT_INDEX = '.alerts-observability.threshold.alerts-default';
|
||||
const ALERT_ACTION_INDEX = 'alert-action-threshold';
|
||||
const DATA_VIEW_ID = 'data-view-id';
|
||||
|
|
|
@ -17,7 +17,9 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
const alertingApi = getService('alertingApi');
|
||||
const dataViewApi = getService('dataViewApi');
|
||||
|
||||
describe('Threshold rule - AVG - PCT - NoData', () => {
|
||||
// Blocked API: index_not_found_exception: no such index [.alerts-observability.threshold.alerts-default]
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
describe.skip('Threshold rule - AVG - PCT - NoData', () => {
|
||||
const THRESHOLD_RULE_ALERT_INDEX = '.alerts-observability.threshold.alerts-default';
|
||||
const ALERT_ACTION_INDEX = 'alert-action-threshold';
|
||||
const DATA_VIEW_ID = 'data-view-id-no-data';
|
||||
|
|
|
@ -26,7 +26,8 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
const alertingApi = getService('alertingApi');
|
||||
const dataViewApi = getService('dataViewApi');
|
||||
|
||||
describe('Threshold rule - CUSTOM_EQ - AVG - BYTES - FIRED', () => {
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
describe.skip('Threshold rule - CUSTOM_EQ - AVG - BYTES - FIRED', () => {
|
||||
const THRESHOLD_RULE_ALERT_INDEX = '.alerts-observability.threshold.alerts-default';
|
||||
const ALERT_ACTION_INDEX = 'alert-action-threshold';
|
||||
const DATA_VIEW_ID = 'data-view-id';
|
||||
|
|
|
@ -20,7 +20,8 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
const alertingApi = getService('alertingApi');
|
||||
const dataViewApi = getService('dataViewApi');
|
||||
|
||||
describe('Threshold rule - DOCUMENTS_COUNT - FIRED', () => {
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
describe.skip('Threshold rule - DOCUMENTS_COUNT - FIRED', () => {
|
||||
const THRESHOLD_RULE_ALERT_INDEX = '.alerts-observability.threshold.alerts-default';
|
||||
const ALERT_ACTION_INDEX = 'alert-action-threshold';
|
||||
const DATA_VIEW_ID = 'data-view-id';
|
||||
|
|
|
@ -30,7 +30,8 @@ export default function ({ getService }: FtrProviderContext) {
|
|||
let alertId: string;
|
||||
let startedAt: string;
|
||||
|
||||
describe('Threshold rule - GROUP_BY - FIRED', () => {
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
describe.skip('Threshold rule - GROUP_BY - FIRED', () => {
|
||||
const THRESHOLD_RULE_ALERT_INDEX = '.alerts-observability.threshold.alerts-default';
|
||||
const ALERT_ACTION_INDEX = 'alert-action-threshold';
|
||||
const DATA_VIEW_ID = 'data-view-id';
|
||||
|
|
|
@ -112,7 +112,7 @@ export const deleteMappings = async (es: Client): Promise<void> => {
|
|||
});
|
||||
};
|
||||
|
||||
export const defaultUser = { email: null, full_name: null, username: 'elastic' };
|
||||
export const defaultUser = { email: null, full_name: null, username: 'elastic_serverless' };
|
||||
/**
|
||||
* A null filled user will occur when the security plugin is disabled
|
||||
*/
|
||||
|
|
|
@ -6,14 +6,14 @@
|
|||
*/
|
||||
|
||||
import { defineCypressConfig } from '@kbn/cypress-config';
|
||||
import { kbnTestConfig } from '@kbn/test';
|
||||
import { kbnTestConfig, kibanaTestSuperuserServerless } from '@kbn/test';
|
||||
|
||||
import Url from 'url';
|
||||
|
||||
const kibanaUrlWithoutAuth = Url.format({
|
||||
protocol: kbnTestConfig.getUrlParts().protocol,
|
||||
hostname: kbnTestConfig.getUrlParts().hostname,
|
||||
port: kbnTestConfig.getUrlParts().port,
|
||||
protocol: 'https',
|
||||
hostname: kbnTestConfig.getUrlParts(kibanaTestSuperuserServerless).hostname,
|
||||
port: kbnTestConfig.getUrlParts(kibanaTestSuperuserServerless).port,
|
||||
});
|
||||
|
||||
export default defineCypressConfig({
|
||||
|
@ -35,13 +35,13 @@ export default defineCypressConfig({
|
|||
runMode: 1,
|
||||
},
|
||||
e2e: {
|
||||
baseUrl: 'http://localhost:5620',
|
||||
baseUrl: 'https://localhost:5620',
|
||||
supportFile: './support/e2e.ts',
|
||||
specPattern: './e2e/**/*.cy.ts',
|
||||
},
|
||||
env: {
|
||||
username: kbnTestConfig.getUrlParts().username,
|
||||
password: kbnTestConfig.getUrlParts().password,
|
||||
username: kbnTestConfig.getUrlParts(kibanaTestSuperuserServerless).username,
|
||||
password: kbnTestConfig.getUrlParts(kibanaTestSuperuserServerless).password,
|
||||
kibanaUrlWithoutAuth,
|
||||
},
|
||||
});
|
||||
|
|
|
@ -83,7 +83,9 @@ export default function ({ getService, getPageObjects }: FtrProviderContext) {
|
|||
}
|
||||
});
|
||||
|
||||
it('should display an empty prompt for no integrations', async () => {
|
||||
// Skip: failing assertion
|
||||
// Issue: https://github.com/elastic/kibana/issues/165138
|
||||
it.skip('should display an empty prompt for no integrations', async () => {
|
||||
const { integrations } = await PageObjects.discoverLogExplorer.getIntegrations();
|
||||
expect(integrations.length).to.be(0);
|
||||
|
||||
|
|
|
@ -18,7 +18,9 @@ export default ({ getPageObject, getService }: FtrProviderContext) => {
|
|||
const cases = getService('cases');
|
||||
const find = getService('find');
|
||||
|
||||
describe('persistable attachment', () => {
|
||||
// Failing
|
||||
// Issue: https://github.com/elastic/kibana/issues/165135
|
||||
describe.skip('persistable attachment', () => {
|
||||
describe('lens visualization', () => {
|
||||
before(async () => {
|
||||
await esArchiver.loadIfNeeded('x-pack/test/functional/es_archives/logstash_functional');
|
||||
|
|
|
@ -7,15 +7,27 @@
|
|||
|
||||
import { resolve } from 'path';
|
||||
import { format as formatUrl } from 'url';
|
||||
import Fs from 'fs';
|
||||
|
||||
import { REPO_ROOT } from '@kbn/repo-info';
|
||||
import { esTestConfig, kbnTestConfig, kibanaServerTestUser } from '@kbn/test';
|
||||
import {
|
||||
esTestConfig,
|
||||
kbnTestConfig,
|
||||
kibanaTestSuperuserServerless,
|
||||
getDockerFileMountPath,
|
||||
} from '@kbn/test';
|
||||
import { CA_CERT_PATH, KBN_CERT_PATH, KBN_KEY_PATH, kibanaDevServiceAccount } from '@kbn/dev-utils';
|
||||
import { commonFunctionalServices } from '@kbn/ftr-common-functional-services';
|
||||
import { services } from './services';
|
||||
|
||||
export default async () => {
|
||||
const servers = {
|
||||
kibana: kbnTestConfig.getUrlParts(),
|
||||
elasticsearch: esTestConfig.getUrlParts(),
|
||||
kibana: {
|
||||
...kbnTestConfig.getUrlParts(kibanaTestSuperuserServerless),
|
||||
protocol: 'https',
|
||||
certificateAuthorities: [Fs.readFileSync(CA_CERT_PATH)],
|
||||
},
|
||||
elasticsearch: { ...esTestConfig.getUrlParts(), protocol: 'https' },
|
||||
};
|
||||
|
||||
// "Fake" SAML provider
|
||||
|
@ -32,37 +44,39 @@ export default async () => {
|
|||
|
||||
return {
|
||||
servers,
|
||||
|
||||
browser: {
|
||||
acceptInsecureCerts: true,
|
||||
},
|
||||
esTestCluster: {
|
||||
license: 'trial',
|
||||
from: 'snapshot',
|
||||
from: 'serverless',
|
||||
files: [idpPath, jwksPath],
|
||||
serverArgs: [
|
||||
'xpack.security.authc.realms.file.file1.order=-100',
|
||||
|
||||
'xpack.security.authc.realms.jwt.jwt1.order=-98',
|
||||
`xpack.security.authc.realms.jwt.jwt1.token_type=access_token`,
|
||||
'xpack.security.authc.realms.jwt.jwt1.client_authentication.type=shared_secret',
|
||||
`xpack.security.authc.realms.jwt.jwt1.client_authentication.shared_secret=my_super_secret`,
|
||||
`xpack.security.authc.realms.jwt.jwt1.allowed_issuer=https://kibana.elastic.co/jwt/`,
|
||||
`xpack.security.authc.realms.jwt.jwt1.allowed_subjects=elastic-agent`,
|
||||
'xpack.security.authc.realms.jwt.jwt1.allowed_audiences=elasticsearch',
|
||||
`xpack.security.authc.realms.jwt.jwt1.allowed_signature_algorithms=[RS256]`,
|
||||
`xpack.security.authc.realms.jwt.jwt1.claims.principal=sub`,
|
||||
`xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path=${jwksPath}`,
|
||||
`xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path=${getDockerFileMountPath(jwksPath)}`,
|
||||
|
||||
// TODO: We should set this flag to `false` as soon as we fully migrate tests to SAML and file realms.
|
||||
`xpack.security.authc.realms.native.native1.enabled=true`,
|
||||
`xpack.security.authc.realms.native.native1.enabled=false`,
|
||||
`xpack.security.authc.realms.native.native1.order=-97`,
|
||||
|
||||
'xpack.security.authc.token.enabled=true',
|
||||
'xpack.security.authc.realms.saml.cloud-saml-kibana.order=101',
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.idp.metadata.path=${idpPath}`,
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.idp.metadata.path=${getDockerFileMountPath(
|
||||
idpPath
|
||||
)}`,
|
||||
'xpack.security.authc.realms.saml.cloud-saml-kibana.idp.entity_id=http://www.elastic.co/saml1',
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.sp.entity_id=http://localhost:${servers.kibana.port}`,
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.sp.logout=http://localhost:${servers.kibana.port}/logout`,
|
||||
`xpack.security.authc.realms.saml.cloud-saml-kibana.sp.acs=http://localhost:${servers.kibana.port}/api/security/saml/callback`,
|
||||
'xpack.security.authc.realms.saml.cloud-saml-kibana.attributes.principal=urn:oid:0.0.7',
|
||||
],
|
||||
ssl: true, // not needed as for serverless ssl is always on but added it anyway
|
||||
},
|
||||
|
||||
kbnTestServer: {
|
||||
|
@ -72,6 +86,10 @@ export default async () => {
|
|||
},
|
||||
sourceArgs: ['--no-base-path', '--env.name=development'],
|
||||
serverArgs: [
|
||||
'--server.ssl.enabled=true',
|
||||
`--server.ssl.key=${KBN_KEY_PATH}`,
|
||||
`--server.ssl.certificate=${KBN_CERT_PATH}`,
|
||||
`--server.ssl.certificateAuthorities=${CA_CERT_PATH}`,
|
||||
`--server.restrictInternalApis=true`,
|
||||
`--server.port=${servers.kibana.port}`,
|
||||
'--status.allowAnonymous=true',
|
||||
|
@ -84,8 +102,8 @@ export default async () => {
|
|||
Object.entries(servers.elasticsearch).filter(([key]) => key.toLowerCase() !== 'auth')
|
||||
)
|
||||
)}`,
|
||||
`--elasticsearch.username=${kibanaServerTestUser.username}`,
|
||||
`--elasticsearch.password=${kibanaServerTestUser.password}`,
|
||||
`--elasticsearch.serviceAccountToken=${kibanaDevServiceAccount.token}`,
|
||||
`--elasticsearch.ssl.certificateAuthorities=${CA_CERT_PATH}`,
|
||||
'--telemetry.sendUsageTo=staging',
|
||||
`--logging.appenders.deprecation=${JSON.stringify({
|
||||
type: 'console',
|
||||
|
@ -122,6 +140,7 @@ export default async () => {
|
|||
|
||||
services: {
|
||||
...commonFunctionalServices,
|
||||
...services,
|
||||
},
|
||||
|
||||
// overriding default timeouts from packages/kbn-test/src/functional_test_runner/lib/config/schema.ts
|
||||
|
|
|
@ -5,4 +5,8 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
export const services = {};
|
||||
import { SupertestProvider, SupertestWithoutAuthProvider } from './supertest';
|
||||
export const services = {
|
||||
supertest: SupertestProvider,
|
||||
supertestWithoutAuth: SupertestWithoutAuthProvider,
|
||||
};
|
||||
|
|
29
x-pack/test_serverless/shared/services/supertest.ts
Normal file
29
x-pack/test_serverless/shared/services/supertest.ts
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
import { format as formatUrl } from 'url';
|
||||
import supertest from 'supertest';
|
||||
import { FtrProviderContext } from '../../functional/ftr_provider_context';
|
||||
|
||||
export function SupertestProvider({ getService }: FtrProviderContext) {
|
||||
const config = getService('config');
|
||||
const kbnUrl = formatUrl(config.get('servers.kibana'));
|
||||
const cAuthorities = config.get('servers.kibana').certificateAuthorities;
|
||||
|
||||
return supertest.agent(kbnUrl, { ca: cAuthorities });
|
||||
}
|
||||
|
||||
export function SupertestWithoutAuthProvider({ getService }: FtrProviderContext) {
|
||||
const config = getService('config');
|
||||
const kbnUrl = formatUrl({
|
||||
...config.get('servers.kibana'),
|
||||
auth: false,
|
||||
});
|
||||
const cAuthorities = config.get('servers.kibana').certificateAuthorities;
|
||||
|
||||
return supertest.agent(kbnUrl, { ca: cAuthorities });
|
||||
}
|
|
@ -50,6 +50,7 @@
|
|||
"@kbn/security-api-integration-helpers",
|
||||
"@kbn/data-view-field-editor-plugin",
|
||||
"@kbn/data-plugin",
|
||||
"@kbn/dev-utils",
|
||||
"@kbn/bfetch-plugin",
|
||||
]
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue