mirror of
https://github.com/elastic/kibana.git
synced 2025-06-27 18:51:07 -04:00
Add support for Docker and Serverless to kbn/es (#161927)
Closes #159260 ## Summary Adds support for running ES through Docker and Serverless in `@kbn/es` ### Checklist Delete any items that are not applicable to this PR. - [x] [Documentation](https://www.elastic.co/guide/en/kibana/master/development-documentation.html) was added for features that require explanation or tutorials - [x] [Unit or functional tests](https://www.elastic.co/guide/en/kibana/master/development-tests.html) were updated or added to match the most common scenarios --------- Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com> Co-authored-by: Tiago Costa <tiago.costa@elastic.co>
This commit is contained in:
parent
4268b88c8e
commit
7d1b1bed6c
10 changed files with 951 additions and 4 deletions
|
@ -7,11 +7,13 @@ date: 2022-05-24
|
|||
tags: ['kibana', 'dev', 'contributor', 'operations', 'es']
|
||||
---
|
||||
|
||||
> A command line utility for running elasticsearch from snapshot, source, archive or even building snapshot artifacts.
|
||||
> A command line utility for running elasticsearch from snapshot, source, archive, docker, serverless or even building snapshot artifacts.
|
||||
|
||||
## Getting started
|
||||
If running elasticsearch from source, elasticsearch needs to be cloned to a sibling directory of Kibana.
|
||||
|
||||
If running elasticsearch serverless or a docker container, docker is required to be installed locally. Installation instructions can be found [here](https://www.docker.com/).
|
||||
|
||||
To run, go to the Kibana root and run `node scripts/es --help` to get the latest command line options.
|
||||
|
||||
The script attempts to preserve the existing interfaces used by Elasticsearch CLI. This includes passing through options with the `-E` argument and the `ES_JAVA_OPTS` environment variable for Java options.
|
||||
|
@ -28,6 +30,16 @@ Run from source with a configured data directory
|
|||
node scripts/es source --Epath.data=/home/me/es_data
|
||||
```
|
||||
|
||||
Run serverless with a specific image tag
|
||||
```
|
||||
node scripts/es serverless --tag git-fec36430fba2-x86_64
|
||||
```
|
||||
|
||||
Run an official Docker release
|
||||
```
|
||||
node scripts/es docker --tag 8.8.2
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### run
|
||||
|
|
67
packages/kbn-es/src/cli_commands/docker.ts
Normal file
67
packages/kbn-es/src/cli_commands/docker.ts
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import dedent from 'dedent';
|
||||
import getopts from 'getopts';
|
||||
import { ToolingLog } from '@kbn/tooling-log';
|
||||
import { getTimeReporter } from '@kbn/ci-stats-reporter';
|
||||
|
||||
import { Cluster } from '../cluster';
|
||||
import { DOCKER_IMG, DOCKER_REPO, DOCKER_TAG } from '../utils';
|
||||
import { Command } from './types';
|
||||
|
||||
export const docker: Command = {
|
||||
description: 'Run an Elasticsearch Docker image',
|
||||
usage: 'es docker [<args>]',
|
||||
help: (defaults: Record<string, any> = {}) => {
|
||||
const { password } = defaults;
|
||||
|
||||
return dedent`
|
||||
Options:
|
||||
|
||||
--tag Image tag of ES to run from ${DOCKER_REPO} [default: ${DOCKER_TAG}]
|
||||
--image Full path to image of ES to run, has precedence over tag. [default: ${DOCKER_IMG}]
|
||||
--password Sets password for elastic user [default: ${password}]
|
||||
-E Additional key=value settings to pass to Elasticsearch
|
||||
-D Override Docker command
|
||||
|
||||
Examples:
|
||||
|
||||
es docker --tag master-SNAPSHOT-amd64
|
||||
es docker --image docker.elastic.co/repo:tag
|
||||
es docker -D 'start es01'
|
||||
`;
|
||||
},
|
||||
run: async (defaults = {}) => {
|
||||
const runStartTime = Date.now();
|
||||
const log = new ToolingLog({
|
||||
level: 'info',
|
||||
writeTo: process.stdout,
|
||||
});
|
||||
const reportTime = getTimeReporter(log, 'scripts/es docker');
|
||||
|
||||
const argv = process.argv.slice(2);
|
||||
const options = getopts(argv, {
|
||||
alias: {
|
||||
esArgs: 'E',
|
||||
dockerCmd: 'D',
|
||||
},
|
||||
|
||||
string: ['tag', 'image', 'D'],
|
||||
|
||||
default: defaults,
|
||||
});
|
||||
|
||||
const cluster = new Cluster();
|
||||
await cluster.runDocker({
|
||||
reportTime,
|
||||
startTime: runStartTime,
|
||||
...options,
|
||||
});
|
||||
},
|
||||
};
|
|
@ -10,10 +10,14 @@ import { snapshot } from './snapshot';
|
|||
import { source } from './source';
|
||||
import { archive } from './archive';
|
||||
import { buildSnapshots } from './build_snapshots';
|
||||
import { docker } from './docker';
|
||||
import { serverless } from './serverless';
|
||||
|
||||
export const commands = {
|
||||
snapshot,
|
||||
source,
|
||||
archive,
|
||||
build_snapshots: buildSnapshots,
|
||||
docker,
|
||||
serverless,
|
||||
};
|
||||
|
|
64
packages/kbn-es/src/cli_commands/serverless.ts
Normal file
64
packages/kbn-es/src/cli_commands/serverless.ts
Normal file
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import dedent from 'dedent';
|
||||
import getopts from 'getopts';
|
||||
import { ToolingLog } from '@kbn/tooling-log';
|
||||
import { getTimeReporter } from '@kbn/ci-stats-reporter';
|
||||
|
||||
import { Cluster } from '../cluster';
|
||||
import { SERVERLESS_REPO, SERVERLESS_TAG, SERVERLESS_IMG } from '../utils';
|
||||
import { Command } from './types';
|
||||
|
||||
export const serverless: Command = {
|
||||
description: 'Run Serverless Elasticsearch through Docker',
|
||||
usage: 'es serverless [<args>]',
|
||||
help: (defaults: Record<string, any> = {}) => {
|
||||
return dedent`
|
||||
Options:
|
||||
|
||||
--tag Image tag of ES Serverless to run from ${SERVERLESS_REPO} [default: ${SERVERLESS_TAG}]
|
||||
--image Full path of ES Serverless image to run, has precedence over tag. [default: ${SERVERLESS_IMG}]
|
||||
--clean Remove existing file system object store before running
|
||||
-E Additional key=value settings to pass to Elasticsearch
|
||||
|
||||
Examples:
|
||||
|
||||
es serverless --tag git-fec36430fba2-x86_64
|
||||
es serverless --image docker.elastic.co/repo:tag
|
||||
`;
|
||||
},
|
||||
run: async (defaults = {}) => {
|
||||
const runStartTime = Date.now();
|
||||
const log = new ToolingLog({
|
||||
level: 'info',
|
||||
writeTo: process.stdout,
|
||||
});
|
||||
const reportTime = getTimeReporter(log, 'scripts/es serverless');
|
||||
|
||||
const argv = process.argv.slice(2);
|
||||
const options = getopts(argv, {
|
||||
alias: {
|
||||
basePath: 'base-path',
|
||||
esArgs: 'E',
|
||||
},
|
||||
|
||||
string: ['tag', 'image'],
|
||||
boolean: ['clean'],
|
||||
|
||||
default: defaults,
|
||||
});
|
||||
|
||||
const cluster = new Cluster();
|
||||
await cluster.runServerless({
|
||||
reportTime,
|
||||
startTime: runStartTime,
|
||||
...options,
|
||||
});
|
||||
},
|
||||
};
|
|
@ -21,6 +21,8 @@ const {
|
|||
extractConfigFiles,
|
||||
NativeRealm,
|
||||
parseTimeoutToMs,
|
||||
runServerlessCluster,
|
||||
runDockerContainer,
|
||||
} = require('./utils');
|
||||
const { createCliError } = require('./errors');
|
||||
const { promisify } = require('util');
|
||||
|
@ -31,6 +33,8 @@ const { CA_CERT_PATH, ES_NOPASSWORD_P12_PATH, extract } = require('@kbn/dev-util
|
|||
const DEFAULT_READY_TIMEOUT = parseTimeoutToMs('1m');
|
||||
|
||||
/** @typedef {import('./cluster_exec_options').EsClusterExecOptions} ExecOptions */
|
||||
/** @typedef {import('./utils').DockerOptions} DockerOptions */
|
||||
/** @typedef {import('./utils').ServerlessOptions}ServerlessrOptions */
|
||||
|
||||
// listen to data on stream until map returns anything but undefined
|
||||
const first = (stream, map) =>
|
||||
|
@ -467,7 +471,7 @@ exports.Cluster = class Cluster {
|
|||
if (stdioTarget) {
|
||||
stdioTarget.write(chunk);
|
||||
} else {
|
||||
this._log.error(chalk.red());
|
||||
this._log.error(chalk.red(chunk.trim()));
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -483,7 +487,7 @@ exports.Cluster = class Cluster {
|
|||
});
|
||||
}
|
||||
|
||||
// observe the exit code of the process and reflect in _outcome promies
|
||||
// observe the exit code of the process and reflect in _outcome promises
|
||||
const exitCode = new Promise((resolve) => this._process.once('exit', resolve));
|
||||
this._outcome = exitCode.then((code) => {
|
||||
if (this._stopCalled) {
|
||||
|
@ -558,4 +562,30 @@ exports.Cluster = class Cluster {
|
|||
}
|
||||
return esJavaOpts.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Run an Elasticsearch Serverless Docker cluster
|
||||
*
|
||||
* @param {ServerlessOptions} options
|
||||
*/
|
||||
async runServerless(options = {}) {
|
||||
if (this._process || this._outcome) {
|
||||
throw new Error('ES has already been started');
|
||||
}
|
||||
|
||||
await runServerlessCluster(this._log, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run an Elasticsearch Docker container
|
||||
*
|
||||
* @param {DockerOptions} options
|
||||
*/
|
||||
async runDocker(options = {}) {
|
||||
if (this._process || this._outcome) {
|
||||
throw new Error('ES has already been started');
|
||||
}
|
||||
|
||||
this._process = await runDockerContainer(this._log, options);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -10,7 +10,7 @@ export interface EsClusterExecOptions {
|
|||
skipNativeRealmSetup?: boolean;
|
||||
reportTime?: (...args: any[]) => void;
|
||||
startTime?: number;
|
||||
esArgs?: string[];
|
||||
esArgs?: string[] | string;
|
||||
esJavaOpts?: string;
|
||||
password?: string;
|
||||
skipReadyCheck?: boolean;
|
||||
|
|
370
packages/kbn-es/src/utils/docker.test.ts
Normal file
370
packages/kbn-es/src/utils/docker.test.ts
Normal file
|
@ -0,0 +1,370 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import mockFs from 'mock-fs';
|
||||
|
||||
import { existsSync } from 'fs';
|
||||
import { stat } from 'fs/promises';
|
||||
|
||||
import {
|
||||
DOCKER_IMG,
|
||||
maybeCreateDockerNetwork,
|
||||
resolveDockerCmd,
|
||||
resolveDockerImage,
|
||||
resolveEsArgs,
|
||||
runDockerContainer,
|
||||
runServerlessCluster,
|
||||
runServerlessEsNode,
|
||||
SERVERLESS_IMG,
|
||||
setupServerlessVolumes,
|
||||
verifyDockerInstalled,
|
||||
} from './docker';
|
||||
import { ToolingLog, ToolingLogCollectingWriter } from '@kbn/tooling-log';
|
||||
|
||||
jest.mock('execa');
|
||||
const execa = jest.requireMock('execa');
|
||||
|
||||
const log = new ToolingLog();
|
||||
const logWriter = new ToolingLogCollectingWriter();
|
||||
log.setWriters([logWriter]);
|
||||
|
||||
const KIBANA_ROOT = process.cwd();
|
||||
const baseEsPath = `${KIBANA_ROOT}/.es`;
|
||||
const serverlessDir = 'stateless';
|
||||
const serverlessObjectStorePath = `${baseEsPath}/${serverlessDir}`;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetAllMocks();
|
||||
log.indent(-log.getIndent());
|
||||
logWriter.messages.length = 0;
|
||||
|
||||
// jest relies on the filesystem to get sourcemaps when using console.log
|
||||
// which breaks with the mocked FS, see https://github.com/tschaub/mock-fs/issues/234
|
||||
// hijacking logging to process.stdout as a workaround for this suite.
|
||||
jest.spyOn(console, 'log').mockImplementation((...args) => {
|
||||
process.stdout.write(args + '\n');
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mockFs.restore();
|
||||
// restore the console.log behavior
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
const volumeCmdTest = async (volumeCmd: string[]) => {
|
||||
expect(volumeCmd).toHaveLength(2);
|
||||
expect(volumeCmd).toEqual(expect.arrayContaining(['--volume', `${baseEsPath}:/objectstore:z`]));
|
||||
|
||||
// extract only permission from mode
|
||||
// eslint-disable-next-line no-bitwise
|
||||
expect((await stat(serverlessObjectStorePath)).mode & 0o777).toBe(0o766);
|
||||
};
|
||||
|
||||
describe('resolveDockerImage()', () => {
|
||||
const defaultRepo = 'another/repo';
|
||||
const defaultImg = 'default/reg/repo:tag';
|
||||
const tag = '8.8.2';
|
||||
|
||||
test('should return default image when no options', () => {
|
||||
const image = resolveDockerImage({ repo: defaultRepo, defaultImg });
|
||||
|
||||
expect(image).toEqual(defaultImg);
|
||||
});
|
||||
|
||||
test('should return tag with default repo when tag is passed', () => {
|
||||
const image = resolveDockerImage({ repo: defaultRepo, tag, defaultImg });
|
||||
|
||||
expect(image).toMatchInlineSnapshot(`"another/repo:8.8.2"`);
|
||||
});
|
||||
|
||||
test('should return image when tag is also passed', () => {
|
||||
const image = resolveDockerImage({ repo: defaultRepo, tag, image: DOCKER_IMG, defaultImg });
|
||||
|
||||
expect(image).toEqual(DOCKER_IMG);
|
||||
});
|
||||
|
||||
test('should error when invalid registry is passed', () => {
|
||||
expect(() =>
|
||||
resolveDockerImage({
|
||||
repo: defaultRepo,
|
||||
tag,
|
||||
image: 'another.registry.co/es/es:latest',
|
||||
defaultImg,
|
||||
})
|
||||
).toThrowErrorMatchingInlineSnapshot(`
|
||||
"Only verified images from docker.elastic.co are currently allowed.
|
||||
If you require this functionality in @kbn/es please contact the Kibana Operations Team."
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('verifyDockerInstalled()', () => {
|
||||
test('should call the correct Docker command and log the version', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.resolve({ stdout: 'Docker Version 123' }));
|
||||
|
||||
await verifyDockerInstalled(log);
|
||||
|
||||
expect(execa.mock.calls).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Array [
|
||||
"docker",
|
||||
Array [
|
||||
"--version",
|
||||
],
|
||||
],
|
||||
]
|
||||
`);
|
||||
|
||||
expect(logWriter.messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
" [34minfo[39m [1mVerifying Docker is installed.[22m",
|
||||
" │ [34minfo[39m Docker Version 123",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should reject when Docker is not installed', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.reject({ message: 'Hello World' }));
|
||||
|
||||
await expect(verifyDockerInstalled(log)).rejects.toThrowErrorMatchingInlineSnapshot(`
|
||||
"Docker not found locally. Install it from: https://www.docker.com
|
||||
|
||||
Hello World"
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('maybeCreateDockerNetwork()', () => {
|
||||
test('should call the correct Docker command and create the network if needed', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.resolve({ exitCode: 0 }));
|
||||
|
||||
await maybeCreateDockerNetwork(log);
|
||||
|
||||
expect(execa.mock.calls).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Array [
|
||||
"docker",
|
||||
Array [
|
||||
"network",
|
||||
"create",
|
||||
"elastic",
|
||||
],
|
||||
],
|
||||
]
|
||||
`);
|
||||
|
||||
expect(logWriter.messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
" [34minfo[39m [1mChecking status of elastic Docker network.[22m",
|
||||
" │ [34minfo[39m Created new network.",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should use an existing network', async () => {
|
||||
execa.mockImplementationOnce(() =>
|
||||
Promise.reject({ message: 'network with name elastic already exists' })
|
||||
);
|
||||
|
||||
await maybeCreateDockerNetwork(log);
|
||||
|
||||
expect(logWriter.messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
" [34minfo[39m [1mChecking status of elastic Docker network.[22m",
|
||||
" │ [34minfo[39m Using existing network.",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should reject for any other Docker error', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.reject({ message: 'some error' }));
|
||||
|
||||
await expect(maybeCreateDockerNetwork(log)).rejects.toThrowErrorMatchingInlineSnapshot(
|
||||
`"some error"`
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveEsArgs()', () => {
|
||||
const defaultEsArgs: Array<[string, string]> = [
|
||||
['foo', 'bar'],
|
||||
['qux', 'zip'],
|
||||
];
|
||||
|
||||
test('should return default args when no options', () => {
|
||||
const esArgs = resolveEsArgs(defaultEsArgs, {});
|
||||
|
||||
expect(esArgs).toHaveLength(4);
|
||||
expect(esArgs).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"--env",
|
||||
"foo=bar",
|
||||
"--env",
|
||||
"qux=zip",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should override default args when options is a string', () => {
|
||||
const esArgs = resolveEsArgs(defaultEsArgs, { esArgs: 'foo=true' });
|
||||
|
||||
expect(esArgs).toHaveLength(4);
|
||||
expect(esArgs).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"--env",
|
||||
"foo=true",
|
||||
"--env",
|
||||
"qux=zip",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should override default args when options is an array', () => {
|
||||
const esArgs = resolveEsArgs(defaultEsArgs, { esArgs: ['foo=false', 'qux=true'] });
|
||||
|
||||
expect(esArgs).toHaveLength(4);
|
||||
expect(esArgs).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"--env",
|
||||
"foo=false",
|
||||
"--env",
|
||||
"qux=true",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
test('should override defaults args and handle password option', () => {
|
||||
const esArgs = resolveEsArgs(defaultEsArgs, { esArgs: 'foo=false', password: 'hello' });
|
||||
|
||||
expect(esArgs).toHaveLength(6);
|
||||
expect(esArgs).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"--env",
|
||||
"foo=false",
|
||||
"--env",
|
||||
"qux=zip",
|
||||
"--env",
|
||||
"ELASTIC_PASSWORD=hello",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('setupServerlessVolumes()', () => {
|
||||
const existingObjectStore = {
|
||||
[baseEsPath]: {
|
||||
[serverlessDir]: {
|
||||
cluster_state: { 0: {}, 1: {}, lease: 'hello world' },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
test('should create stateless directory and return volume docker command', async () => {
|
||||
mockFs({
|
||||
[baseEsPath]: {},
|
||||
});
|
||||
|
||||
const volumeCmd = await setupServerlessVolumes(log, { basePath: baseEsPath });
|
||||
|
||||
volumeCmdTest(volumeCmd);
|
||||
expect(existsSync(serverlessObjectStorePath)).toBe(true);
|
||||
});
|
||||
|
||||
test('should use an existing object store', async () => {
|
||||
mockFs(existingObjectStore);
|
||||
|
||||
const volumeCmd = await setupServerlessVolumes(log, { basePath: baseEsPath });
|
||||
|
||||
volumeCmdTest(volumeCmd);
|
||||
expect(existsSync(`${serverlessObjectStorePath}/cluster_state/lease`)).toBe(true);
|
||||
});
|
||||
|
||||
test('should remove an existing object store when clean is passed', async () => {
|
||||
mockFs(existingObjectStore);
|
||||
|
||||
const volumeCmd = await setupServerlessVolumes(log, { basePath: baseEsPath, clean: true });
|
||||
|
||||
volumeCmdTest(volumeCmd);
|
||||
expect(existsSync(`${serverlessObjectStorePath}/cluster_state/lease`)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('runServerlessEsNode()', () => {
|
||||
const node = {
|
||||
params: ['--env', 'foo=bar', '--volume', 'foo/bar'],
|
||||
name: 'es01',
|
||||
image: SERVERLESS_IMG,
|
||||
};
|
||||
|
||||
test('should call the correct Docker command', async () => {
|
||||
execa.mockImplementationOnce(() => Promise.resolve({ stdout: 'containerId1234' }));
|
||||
|
||||
await runServerlessEsNode(log, node);
|
||||
|
||||
expect(execa.mock.calls[0][0]).toEqual('docker');
|
||||
expect(execa.mock.calls[0][1]).toEqual(
|
||||
expect.arrayContaining([
|
||||
SERVERLESS_IMG,
|
||||
...node.params,
|
||||
'--name',
|
||||
node.name,
|
||||
'--env',
|
||||
`node.name=${node.name}`,
|
||||
'run',
|
||||
'--detach',
|
||||
'--net',
|
||||
'elastic',
|
||||
])
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('runServerlessCluster()', () => {
|
||||
test('should start 3 serverless nodes', async () => {
|
||||
mockFs({
|
||||
[baseEsPath]: {},
|
||||
});
|
||||
execa.mockImplementation(() => Promise.resolve({ stdout: '' }));
|
||||
|
||||
await runServerlessCluster(log, { basePath: baseEsPath });
|
||||
|
||||
// Verify Docker and network then run three nodes
|
||||
expect(execa.mock.calls).toHaveLength(5);
|
||||
});
|
||||
});
|
||||
|
||||
describe('resolveDockerCmd()', () => {
|
||||
test('should return default command when no options', () => {
|
||||
const dockerCmd = resolveDockerCmd({});
|
||||
|
||||
expect(dockerCmd).toEqual(expect.arrayContaining(['run', DOCKER_IMG]));
|
||||
});
|
||||
|
||||
test('should return custom command when passed', () => {
|
||||
const dockerCmd = resolveDockerCmd({ dockerCmd: 'start -a es01' });
|
||||
|
||||
expect(dockerCmd).toHaveLength(3);
|
||||
expect(dockerCmd).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"start",
|
||||
"-a",
|
||||
"es01",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('runDockerContainer()', () => {
|
||||
test('should resolve', async () => {
|
||||
execa.mockImplementation(() => Promise.resolve({ stdout: '' }));
|
||||
|
||||
await expect(runDockerContainer(log, {})).resolves.toEqual({ stdout: '' });
|
||||
// Verify Docker and network then run container
|
||||
expect(execa.mock.calls).toHaveLength(3);
|
||||
});
|
||||
});
|
398
packages/kbn-es/src/utils/docker.ts
Normal file
398
packages/kbn-es/src/utils/docker.ts
Normal file
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import chalk from 'chalk';
|
||||
import execa from 'execa';
|
||||
import fs from 'fs';
|
||||
import Fsp from 'fs/promises';
|
||||
import { resolve } from 'path';
|
||||
|
||||
import { ToolingLog } from '@kbn/tooling-log';
|
||||
import { kibanaPackageJson as pkg } from '@kbn/repo-info';
|
||||
|
||||
import { createCliError } from '../errors';
|
||||
import { EsClusterExecOptions } from '../cluster_exec_options';
|
||||
|
||||
interface BaseOptions {
|
||||
tag?: string;
|
||||
image?: string;
|
||||
}
|
||||
|
||||
export interface DockerOptions extends EsClusterExecOptions, BaseOptions {
|
||||
dockerCmd?: string;
|
||||
}
|
||||
|
||||
export interface ServerlessOptions extends EsClusterExecOptions, BaseOptions {
|
||||
clean?: boolean;
|
||||
basePath: string;
|
||||
}
|
||||
|
||||
interface ServerlessEsNodeArgs {
|
||||
esArgs?: Array<[string, string]>;
|
||||
image: string;
|
||||
name: string;
|
||||
params: string[];
|
||||
}
|
||||
|
||||
const DOCKER_REGISTRY = 'docker.elastic.co';
|
||||
|
||||
const DOCKER_BASE_CMD = [
|
||||
'run',
|
||||
|
||||
'--rm',
|
||||
|
||||
'-t',
|
||||
|
||||
'--net',
|
||||
'elastic',
|
||||
|
||||
'--name',
|
||||
'es01',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9200:9200',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9300:9300',
|
||||
];
|
||||
|
||||
const DEFAULT_DOCKER_ESARGS: Array<[string, string]> = [
|
||||
['ES_JAVA_OPTS', '-Xms1536m -Xmx1536m'],
|
||||
|
||||
['ES_LOG_STYLE', 'file'],
|
||||
|
||||
['discovery.type', 'single-node'],
|
||||
|
||||
['xpack.security.enabled', 'false'],
|
||||
];
|
||||
|
||||
export const DOCKER_REPO = `${DOCKER_REGISTRY}/elasticsearch/elasticsearch`;
|
||||
export const DOCKER_TAG = `${pkg.version}-SNAPSHOT`;
|
||||
export const DOCKER_IMG = `${DOCKER_REPO}:${DOCKER_TAG}`;
|
||||
|
||||
export const SERVERLESS_REPO = `${DOCKER_REGISTRY}/elasticsearch-ci/elasticsearch-serverless`;
|
||||
export const SERVERLESS_TAG = 'latest';
|
||||
export const SERVERLESS_IMG = `${SERVERLESS_REPO}:${SERVERLESS_TAG}`;
|
||||
|
||||
const SHARED_SERVERLESS_PARAMS = [
|
||||
'run',
|
||||
|
||||
'--rm',
|
||||
|
||||
'--detach',
|
||||
|
||||
'--net',
|
||||
'elastic',
|
||||
|
||||
'--env',
|
||||
'cluster.initial_master_nodes=es01,es02,es03',
|
||||
|
||||
'--env',
|
||||
'stateless.enabled=true',
|
||||
|
||||
'--env',
|
||||
'stateless.object_store.type=fs',
|
||||
|
||||
'--env',
|
||||
'stateless.object_store.bucket=stateless',
|
||||
|
||||
'--env',
|
||||
'path.repo=/objectstore',
|
||||
];
|
||||
|
||||
// only allow certain ES args to be overwrote by options
|
||||
const DEFAULT_SERVERLESS_ESARGS: Array<[string, string]> = [
|
||||
['ES_JAVA_OPTS', '-Xms1g -Xmx1g'],
|
||||
|
||||
['xpack.security.enabled', 'false'],
|
||||
|
||||
['cluster.name', 'stateless'],
|
||||
];
|
||||
|
||||
const SERVERLESS_NODES: Array<Omit<ServerlessEsNodeArgs, 'image'>> = [
|
||||
{
|
||||
name: 'es01',
|
||||
params: [
|
||||
'-p',
|
||||
'127.0.0.1:9200:9200',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9300:9300',
|
||||
|
||||
'--env',
|
||||
'discovery.seed_hosts=es02,es03',
|
||||
|
||||
'--env',
|
||||
'node.roles=["master","index"]',
|
||||
],
|
||||
esArgs: [['xpack.searchable.snapshot.shared_cache.size', '1gb']],
|
||||
},
|
||||
{
|
||||
name: 'es02',
|
||||
params: [
|
||||
'-p',
|
||||
'127.0.0.1:9202:9202',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9302:9302',
|
||||
|
||||
'--env',
|
||||
'discovery.seed_hosts=es01,es03',
|
||||
|
||||
'--env',
|
||||
'node.roles=["master","search"]',
|
||||
],
|
||||
esArgs: [['xpack.searchable.snapshot.shared_cache.size', '1gb']],
|
||||
},
|
||||
{
|
||||
name: 'es03',
|
||||
params: [
|
||||
'-p',
|
||||
'127.0.0.1:9203:9203',
|
||||
|
||||
'-p',
|
||||
'127.0.0.1:9303:9303',
|
||||
|
||||
'--env',
|
||||
'discovery.seed_hosts=es01,es02',
|
||||
|
||||
'--env',
|
||||
'node.roles=["master"]',
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
/**
|
||||
* Determine the Docker image from CLI options and defaults
|
||||
*/
|
||||
export function resolveDockerImage({
|
||||
tag,
|
||||
image,
|
||||
repo,
|
||||
defaultImg,
|
||||
}: (ServerlessOptions | DockerOptions) & { repo: string; defaultImg: string }) {
|
||||
if (image) {
|
||||
if (!image.includes(DOCKER_REGISTRY)) {
|
||||
throw createCliError(
|
||||
`Only verified images from ${DOCKER_REGISTRY} are currently allowed.\nIf you require this functionality in @kbn/es please contact the Kibana Operations Team.`
|
||||
);
|
||||
}
|
||||
|
||||
return image;
|
||||
} else if (tag) {
|
||||
return `${repo}:${tag}`;
|
||||
}
|
||||
|
||||
return defaultImg;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that Docker is installed locally
|
||||
*/
|
||||
export async function verifyDockerInstalled(log: ToolingLog) {
|
||||
log.info(chalk.bold('Verifying Docker is installed.'));
|
||||
|
||||
const { stdout } = await execa('docker', ['--version']).catch(({ message }) => {
|
||||
throw createCliError(
|
||||
`Docker not found locally. Install it from: https://www.docker.com\n\n${message}`
|
||||
);
|
||||
});
|
||||
|
||||
log.indent(4, () => log.info(stdout));
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup elastic Docker network if needed
|
||||
*/
|
||||
export async function maybeCreateDockerNetwork(log: ToolingLog) {
|
||||
log.info(chalk.bold('Checking status of elastic Docker network.'));
|
||||
log.indent(4);
|
||||
|
||||
const process = await execa('docker', ['network', 'create', 'elastic']).catch(({ message }) => {
|
||||
if (message.includes('network with name elastic already exists')) {
|
||||
log.info('Using existing network.');
|
||||
} else {
|
||||
throw createCliError(message);
|
||||
}
|
||||
});
|
||||
|
||||
if (process?.exitCode === 0) {
|
||||
log.info('Created new network.');
|
||||
}
|
||||
|
||||
log.indent(-4);
|
||||
}
|
||||
|
||||
/**
|
||||
* Common setup for Docker and Serverless containers
|
||||
*/
|
||||
async function setupDocker(log: ToolingLog) {
|
||||
await verifyDockerInstalled(log);
|
||||
await maybeCreateDockerNetwork(log);
|
||||
}
|
||||
|
||||
/**
|
||||
* Override default esArgs with options.esArgs
|
||||
*/
|
||||
export function resolveEsArgs(
|
||||
defaultEsArgs: Array<[string, string]>,
|
||||
options: ServerlessOptions | DockerOptions
|
||||
) {
|
||||
const esArgs = new Map(defaultEsArgs);
|
||||
|
||||
if (options.esArgs) {
|
||||
const args = typeof options.esArgs === 'string' ? [options.esArgs] : options.esArgs;
|
||||
|
||||
args.forEach((arg) => {
|
||||
const [key, ...value] = arg.split('=');
|
||||
esArgs.set(key.trim(), value.join('=').trim());
|
||||
});
|
||||
}
|
||||
|
||||
if (options.password) {
|
||||
esArgs.set('ELASTIC_PASSWORD', options.password);
|
||||
}
|
||||
|
||||
return Array.from(esArgs).flatMap((e) => ['--env', e.join('=')]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup local volumes for Serverless ES
|
||||
*/
|
||||
export async function setupServerlessVolumes(log: ToolingLog, options: ServerlessOptions) {
|
||||
const volumePath = resolve(options.basePath, 'stateless');
|
||||
|
||||
log.info(chalk.bold(`Checking for local Serverless ES object store at ${volumePath}`));
|
||||
log.indent(4);
|
||||
|
||||
if (options.clean && fs.existsSync(volumePath)) {
|
||||
log.info('Cleaning existing object store.');
|
||||
await Fsp.rm(volumePath, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
if (options.clean || !fs.existsSync(volumePath)) {
|
||||
await Fsp.mkdir(volumePath, { recursive: true }).then(() =>
|
||||
log.info('Created new object store.')
|
||||
);
|
||||
} else {
|
||||
log.info('Using existing object store.');
|
||||
}
|
||||
|
||||
// Permissions are set separately from mkdir due to default umask
|
||||
await Fsp.chmod(volumePath, 0o766).then(() =>
|
||||
log.info('Setup object store permissions (chmod 766).')
|
||||
);
|
||||
|
||||
log.indent(-4);
|
||||
|
||||
return ['--volume', `${options.basePath}:/objectstore:z`];
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the Serverless ES image based on defaults and CLI options
|
||||
*/
|
||||
function getServerlessImage(options: ServerlessOptions) {
|
||||
return resolveDockerImage({
|
||||
...options,
|
||||
repo: SERVERLESS_REPO,
|
||||
defaultImg: SERVERLESS_IMG,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a single node in the ES Serverless cluster
|
||||
*/
|
||||
export async function runServerlessEsNode(
|
||||
log: ToolingLog,
|
||||
{ params, name, image }: ServerlessEsNodeArgs
|
||||
) {
|
||||
const dockerCmd = SHARED_SERVERLESS_PARAMS.concat(
|
||||
params,
|
||||
['--name', name, '--env', `node.name=${name}`],
|
||||
image
|
||||
);
|
||||
|
||||
log.info(chalk.bold(`Running Serverless ES node: ${name}`));
|
||||
log.indent(4, () => log.info(chalk.dim(`docker ${dockerCmd.join(' ')}`)));
|
||||
|
||||
const { stdout } = await execa('docker', dockerCmd);
|
||||
|
||||
log.indent(4, () =>
|
||||
log.info(`${name} is running.
|
||||
Container Name: ${name}
|
||||
Container Id: ${stdout}
|
||||
|
||||
View logs: ${chalk.bold(`docker logs -f ${name}`)}
|
||||
Shell access: ${chalk.bold(`docker exec -it ${name} /bin/bash`)}
|
||||
`)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs an ES Serverless Cluster through Docker
|
||||
*/
|
||||
export async function runServerlessCluster(log: ToolingLog, options: ServerlessOptions) {
|
||||
await setupDocker(log);
|
||||
|
||||
const volumeCmd = await setupServerlessVolumes(log, options);
|
||||
const image = getServerlessImage(options);
|
||||
|
||||
const nodeNames = await Promise.all(
|
||||
SERVERLESS_NODES.map(async (node) => {
|
||||
await runServerlessEsNode(log, {
|
||||
...node,
|
||||
image,
|
||||
params: node.params.concat(
|
||||
resolveEsArgs(DEFAULT_SERVERLESS_ESARGS.concat(node.esArgs ?? []), options),
|
||||
volumeCmd
|
||||
),
|
||||
});
|
||||
return node.name;
|
||||
})
|
||||
);
|
||||
|
||||
log.success(`Serverless ES cluster running.
|
||||
Stop the cluster: ${chalk.bold(`docker container stop ${nodeNames.join(' ')}`)}
|
||||
`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the Elasticsearch image based on defaults and CLI options
|
||||
*/
|
||||
function getDockerImage(options: DockerOptions) {
|
||||
return resolveDockerImage({ ...options, repo: DOCKER_REPO, defaultImg: DOCKER_IMG });
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the full command to run Elasticsearch Docker container
|
||||
*/
|
||||
export function resolveDockerCmd(options: DockerOptions) {
|
||||
if (options.dockerCmd) {
|
||||
return options.dockerCmd.split(' ');
|
||||
}
|
||||
|
||||
return DOCKER_BASE_CMD.concat(
|
||||
resolveEsArgs(DEFAULT_DOCKER_ESARGS, options),
|
||||
getDockerImage(options)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Runs an Elasticsearch Docker Container
|
||||
*/
|
||||
export async function runDockerContainer(log: ToolingLog, options: DockerOptions) {
|
||||
await setupDocker(log);
|
||||
|
||||
const dockerCmd = resolveDockerCmd(options);
|
||||
|
||||
log.info(chalk.dim(`docker ${dockerCmd.join(' ')}`));
|
||||
return await execa('docker', dockerCmd, {
|
||||
// inherit is required to show Docker pull output and Java console output for pw, enrollment token, etc
|
||||
stdio: ['ignore', 'inherit', 'inherit'],
|
||||
});
|
||||
}
|
|
@ -16,3 +16,4 @@ export { NativeRealm, SYSTEM_INDICES_SUPERUSER } from './native_realm';
|
|||
export { buildSnapshot } from './build_snapshot';
|
||||
export { archiveForPlatform } from './build_snapshot';
|
||||
export * from './parse_timeout_to_ms';
|
||||
export * from './docker';
|
||||
|
|
|
@ -16,5 +16,6 @@
|
|||
"@kbn/dev-proc-runner",
|
||||
"@kbn/ci-stats-reporter",
|
||||
"@kbn/jest-serializers",
|
||||
"@kbn/repo-info",
|
||||
]
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue