kibana/x-pack/platform/packages/shared/kbn-profiler-cli/index.ts
Dario Gieselaar d1493b98e7
@kbn/profiler-cli: collect and display CPU profiles (#216356)
# @kbn/profiler-cli

Profile Kibana while it's running, and open the CPU profile in
Speedscope.

## Usage

Run a command by either preceding it with the profiler script:
`node scripts/profile.js -- $command`

Or by piping it in:
`$command | node scripts/profile.js`

You can also just run it until SIGINT:

`node scripts/profile.js`

Or with a timeout:

`node scripts/profile.js --timeout=10000`

## Examples

### Commands

You can copy a curl request from the browser, and place it after the
command:

`node scripts/profile.js --connections=10 --amount=50 -- curl ...`

You can also use stdin for this, for example:

`pbpaste | node scripts/profile.js`

When using stdin, take into consideration that there is some lag between
starting the script and connecting the profiler, so the profiler might
miss the first second or so of the running process.

You can also use any other command, like `autocannon`, `sleep` or
`xargs`.

### SigInt

By default, the profiler will run until the process exits:`node
scripts/profile.js`. This is useful when you have a long running process
running separately and you want to collect the profile over a longer
time period. Be aware that this might cause memory issues because the
profile will get huge. When you press Cmd+C, the profiler will
gracefully exit and first write the profile to disk and open Speedscope.

---------

Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
2025-04-02 08:47:33 +02:00

105 lines
3.3 KiB
TypeScript

/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { run } from '@kbn/dev-cli-runner';
import { compact, once, uniq } from 'lodash';
import { getKibanaProcessId } from './src/get_kibana_process_id';
import { runCommand } from './src/run_command';
import { runUntilSigInt } from './src/run_until_sigint';
import { getProfiler } from './src/get_profiler';
import { untilStdinCompletes } from './src/until_stdin_completes';
export function cli() {
run(
async ({ flags, log, addCleanupTask }) => {
const pid = flags.pid
? Number(flags.pid)
: await getKibanaProcessId({
ports: uniq(compact([Number(flags.port), 5603, 5601])),
});
const controller = new AbortController();
if (flags.timeout) {
setTimeout(() => {
controller.abort();
}, Number(flags.timeout));
}
process.kill(pid, 'SIGUSR1');
const stop = once(await getProfiler({ log, type: flags.heap ? 'heap' : 'cpu' }));
addCleanupTask(() => {
// exit-hook, which is used by addCleanupTask,
// only allows for synchronous exits, and 3.x
// are on ESM which we currently can't use. so
// we do a really gross thing where we make
// process.exit a noop for a bit until the
// profile has been collected and opened
const exit = process.exit.bind(process);
// @ts-expect-error
process.exit = () => {};
stop()
.then(() => {
exit(0);
})
.catch((error) => {
log.error(error);
exit(1);
});
});
if (!process.stdin.isTTY) {
await untilStdinCompletes();
} else if (flags._.length) {
const connections = Number(flags.c || flags.connections || 1);
const amount = Number(flags.a || flags.amount || 1);
const command = flags._;
log.info(`Executing "${command}" ${amount} times, ${connections} at a time`);
await runCommand({
command,
connections,
amount,
signal: controller.signal,
});
} else {
if (flags.timeout) {
log.info(`Awaiting timeout of ${flags.timeout}ms`);
} else {
log.info(`Awaiting SIGINT (Cmd+C)...`);
}
await runUntilSigInt({
log,
signal: controller.signal,
});
}
await stop();
},
{
flags: {
string: ['port', 'pid', 't', 'timeout', 'c', 'connections', 'a', 'amount'],
boolean: ['heap'],
help: `
Usage: node scripts/profiler.js <args> <command>
--port Port on which Kibana is running. Falls back to 5603 & 5601.
--pid Process ID to hook into it. Takes precedence over \`port\`.
--timeout Run commands until timeout (in milliseconds)
--c, --connections Number of commands that can be run in parallel.
--a, --amount Amount of times the command should be run
--heap Collect a heap snapshot
`,
allowUnexpected: false,
},
}
);
}