mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 17:28:26 -04:00
[8.0] Remove legacy logging (#112305)
* remove kbn-legacy-logging package
* remove legacy service
* remove legacy appender
* remove LegacyObjectToConfigAdapter
* gix types
* remove @hapi/good / @hapi/good-squeeze / @hapi/podium
* remove `default` appender validation for `root` logger
* remove old config key from kibana-docker
* fix FTR config
* fix dev server
* remove reference from readme
* fix unit test
* clean CLI args and remove quiet option
* fix type
* fix status test config
* remove from test config
* fix snapshot
* use another regexp
* update generated doc
* fix createRootWithSettings
* fix some integration tests
* another IT fix
* yet another IT fix
* (will be reverted) add assertion for CI failure
* Revert "(will be reverted) add assertion for CI failure"
This reverts commit 78d5560f9e
.
* switch back to json layout for test
* remove legacy logging config deprecations
* address some review comments
* update documentation
* update kibana.yml config examples
* add config example for `metrics.ops`
Co-authored-by: Tyler Smalley <tyler.smalley@elastic.co>
This commit is contained in:
parent
158b396ae1
commit
a4b74bd398
94 changed files with 108 additions and 4882 deletions
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -243,7 +243,6 @@
|
|||
/packages/kbn-std/ @elastic/kibana-core
|
||||
/packages/kbn-config/ @elastic/kibana-core
|
||||
/packages/kbn-logging/ @elastic/kibana-core
|
||||
/packages/kbn-legacy-logging/ @elastic/kibana-core
|
||||
/packages/kbn-crypto/ @elastic/kibana-core
|
||||
/packages/kbn-http-tools/ @elastic/kibana-core
|
||||
/src/plugins/saved_objects_management/ @elastic/kibana-core
|
||||
|
|
|
@ -84,24 +84,32 @@
|
|||
# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable.
|
||||
#elasticsearch.shardTimeout: 30000
|
||||
|
||||
# Logs queries sent to Elasticsearch. Requires logging.verbose set to true.
|
||||
#elasticsearch.logQueries: false
|
||||
|
||||
# Specifies the path where Kibana creates the process ID file.
|
||||
#pid.file: /run/kibana/kibana.pid
|
||||
|
||||
# Set the value of this setting to off to suppress all logging output, or to debug to log everything.
|
||||
# logging.root.level: debug
|
||||
|
||||
# Enables you to specify a file where Kibana stores log output.
|
||||
#logging.dest: stdout
|
||||
# logging.appenders.default:
|
||||
# type: file
|
||||
# fileName: /var/logs/kibana.log
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
#logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
#logging.quiet: false
|
||||
# Logs queries sent to Elasticsearch.
|
||||
# logging.loggers:
|
||||
# - name: elasticsearch.queries
|
||||
# level: debug
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
#logging.verbose: false
|
||||
# Logs http responses.
|
||||
# logging.loggers:
|
||||
# - name: http.server.response
|
||||
# level: debug
|
||||
|
||||
# Logs system usage information.
|
||||
# logging.loggers:
|
||||
# - name: metrics.ops
|
||||
# level: debug
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 5000.
|
||||
|
|
|
@ -76,9 +76,5 @@ you can override the flags with:
|
|||
|
||||
|--verbose| --logging.root.level=debug --logging.root.appenders[0]=default --logging.root.appenders[1]=custom | --verbose
|
||||
|
||||
|--quiet| --logging.root.level=error --logging.root.appenders[0]=default --logging.root.appenders[1]=custom | not supported
|
||||
|
||||
|--silent| --logging.root.level=off | --silent
|
||||
|===
|
||||
|
||||
NOTE: To preserve backwards compatibility, you are required to pass the root `default` appender until the legacy logging system is removed in `v8.0`.
|
||||
|
|
|
@ -74,7 +74,6 @@ yarn kbn watch
|
|||
- @kbn/i18n
|
||||
- @kbn/interpreter
|
||||
- @kbn/io-ts-utils
|
||||
- @kbn/legacy-logging
|
||||
- @kbn/logging
|
||||
- @kbn/mapbox-gl
|
||||
- @kbn/monaco
|
||||
|
|
|
@ -8,5 +8,5 @@
|
|||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
export declare type AppenderConfigType = ConsoleAppenderConfig | FileAppenderConfig | LegacyAppenderConfig | RewriteAppenderConfig | RollingFileAppenderConfig;
|
||||
export declare type AppenderConfigType = ConsoleAppenderConfig | FileAppenderConfig | RewriteAppenderConfig | RollingFileAppenderConfig;
|
||||
```
|
||||
|
|
|
@ -12,16 +12,6 @@
|
|||
|
||||
Refer to the <<log-settings-examples, examples>> for common configuration use cases. To learn more about possible configuration values, go to {kibana-ref}/logging-service.html[{kib}'s Logging service].
|
||||
|
||||
[[log-settings-compatibility]]
|
||||
==== Backwards compatibility
|
||||
Compatibility with the legacy logging system is assured until the end of the `v7` version.
|
||||
All log messages handled by `root` context (default) are forwarded to the legacy logging service.
|
||||
The logging configuration is validated against the predefined schema and if there are
|
||||
any issues with it, {kib} will fail to start with the detailed error message.
|
||||
|
||||
NOTE: When you switch to the new logging configuration, you will start seeing duplicate log entries in both formats.
|
||||
These will be removed when the `default` appender is no longer required.
|
||||
|
||||
[[log-settings-examples]]
|
||||
==== Examples
|
||||
Here are some configuration examples for the most common logging use cases:
|
||||
|
|
|
@ -32,12 +32,21 @@ server.name
|
|||
Settings unique across each host (for example, running multiple installations on the same virtual machine):
|
||||
[source,js]
|
||||
--------
|
||||
logging.dest
|
||||
path.data
|
||||
pid.file
|
||||
server.port
|
||||
--------
|
||||
|
||||
When using a file appender, the target file must also be unique:
|
||||
[source,yaml]
|
||||
--------
|
||||
logging:
|
||||
appenders:
|
||||
default:
|
||||
type: file
|
||||
fileName: /unique/path/per/instance
|
||||
--------
|
||||
|
||||
Settings that must be the same:
|
||||
[source,js]
|
||||
--------
|
||||
|
|
|
@ -101,7 +101,6 @@
|
|||
"@elastic/ems-client": "7.15.0",
|
||||
"@elastic/eui": "38.0.1",
|
||||
"@elastic/filesaver": "1.1.2",
|
||||
"@elastic/good": "^9.0.1-kibana3",
|
||||
"@elastic/maki": "6.3.0",
|
||||
"@elastic/node-crypto": "1.2.1",
|
||||
"@elastic/numeral": "^2.5.1",
|
||||
|
@ -113,12 +112,10 @@
|
|||
"@hapi/accept": "^5.0.2",
|
||||
"@hapi/boom": "^9.1.4",
|
||||
"@hapi/cookie": "^11.0.2",
|
||||
"@hapi/good-squeeze": "6.0.0",
|
||||
"@hapi/h2o2": "^9.1.0",
|
||||
"@hapi/hapi": "^20.2.0",
|
||||
"@hapi/hoek": "^9.2.0",
|
||||
"@hapi/inert": "^6.0.4",
|
||||
"@hapi/podium": "^4.1.3",
|
||||
"@hapi/wreck": "^17.1.0",
|
||||
"@kbn/ace": "link:bazel-bin/packages/kbn-ace",
|
||||
"@kbn/alerts": "link:bazel-bin/packages/kbn-alerts",
|
||||
|
@ -133,7 +130,6 @@
|
|||
"@kbn/i18n": "link:bazel-bin/packages/kbn-i18n",
|
||||
"@kbn/interpreter": "link:bazel-bin/packages/kbn-interpreter",
|
||||
"@kbn/io-ts-utils": "link:bazel-bin/packages/kbn-io-ts-utils",
|
||||
"@kbn/legacy-logging": "link:bazel-bin/packages/kbn-legacy-logging",
|
||||
"@kbn/logging": "link:bazel-bin/packages/kbn-logging",
|
||||
"@kbn/mapbox-gl": "link:bazel-bin/packages/kbn-mapbox-gl",
|
||||
"@kbn/monaco": "link:bazel-bin/packages/kbn-monaco",
|
||||
|
|
|
@ -29,7 +29,6 @@ filegroup(
|
|||
"//packages/kbn-i18n:build",
|
||||
"//packages/kbn-interpreter:build",
|
||||
"//packages/kbn-io-ts-utils:build",
|
||||
"//packages/kbn-legacy-logging:build",
|
||||
"//packages/kbn-logging:build",
|
||||
"//packages/kbn-mapbox-gl:build",
|
||||
"//packages/kbn-monaco:build",
|
||||
|
|
|
@ -20,7 +20,7 @@ interface BootstrapArgs {
|
|||
}
|
||||
|
||||
export async function bootstrapDevMode({ configs, cliArgs, applyConfigOverrides }: BootstrapArgs) {
|
||||
const log = new CliLog(!!cliArgs.quiet, !!cliArgs.silent);
|
||||
const log = new CliLog(!!cliArgs.silent);
|
||||
|
||||
const env = Env.createDefault(REPO_ROOT, {
|
||||
configs,
|
||||
|
|
|
@ -74,7 +74,6 @@ const createCliArgs = (parts: Partial<SomeCliArgs> = {}): SomeCliArgs => ({
|
|||
runExamples: false,
|
||||
watch: true,
|
||||
silent: false,
|
||||
quiet: false,
|
||||
...parts,
|
||||
});
|
||||
|
||||
|
|
|
@ -48,7 +48,6 @@ const GRACEFUL_TIMEOUT = 30000;
|
|||
|
||||
export type SomeCliArgs = Pick<
|
||||
CliArgs,
|
||||
| 'quiet'
|
||||
| 'silent'
|
||||
| 'verbose'
|
||||
| 'disableOptimizer'
|
||||
|
@ -108,7 +107,7 @@ export class CliDevMode {
|
|||
private subscription?: Rx.Subscription;
|
||||
|
||||
constructor({ cliArgs, config, log }: { cliArgs: SomeCliArgs; config: CliDevConfig; log?: Log }) {
|
||||
this.log = log || new CliLog(!!cliArgs.quiet, !!cliArgs.silent);
|
||||
this.log = log || new CliLog(!!cliArgs.silent);
|
||||
|
||||
if (cliArgs.basePath) {
|
||||
this.basePathProxy = new BasePathProxyServer(this.log, config.http, config.dev);
|
||||
|
@ -163,7 +162,7 @@ export class CliDevMode {
|
|||
runExamples: cliArgs.runExamples,
|
||||
cache: cliArgs.cache,
|
||||
dist: cliArgs.dist,
|
||||
quiet: !!cliArgs.quiet,
|
||||
quiet: false,
|
||||
silent: !!cliArgs.silent,
|
||||
verbose: !!cliArgs.verbose,
|
||||
watch: cliArgs.watch,
|
||||
|
|
|
@ -130,7 +130,6 @@ describe('#run$', () => {
|
|||
Array [
|
||||
"foo",
|
||||
"bar",
|
||||
"--logging.json=false",
|
||||
],
|
||||
Object {
|
||||
"env": Object {
|
||||
|
|
|
@ -21,7 +21,7 @@ export interface Log {
|
|||
|
||||
export class CliLog implements Log {
|
||||
public toolingLog = new ToolingLog({
|
||||
level: this.silent ? 'silent' : this.quiet ? 'error' : 'info',
|
||||
level: this.silent ? 'silent' : 'info',
|
||||
writeTo: {
|
||||
write: (msg) => {
|
||||
this.write(msg);
|
||||
|
@ -29,10 +29,10 @@ export class CliLog implements Log {
|
|||
},
|
||||
});
|
||||
|
||||
constructor(private readonly quiet: boolean, private readonly silent: boolean) {}
|
||||
constructor(private readonly silent: boolean) {}
|
||||
|
||||
good(label: string, ...args: any[]) {
|
||||
if (this.quiet || this.silent) {
|
||||
if (this.silent) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ export class CliLog implements Log {
|
|||
}
|
||||
|
||||
warn(label: string, ...args: any[]) {
|
||||
if (this.quiet || this.silent) {
|
||||
if (this.silent) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ export function usingServerProcess<T>(
|
|||
) {
|
||||
return Rx.using(
|
||||
(): ProcResource => {
|
||||
const proc = execa.node(script, [...argv, '--logging.json=false'], {
|
||||
const proc = execa.node(script, argv, {
|
||||
stdio: 'pipe',
|
||||
nodeOptions: [
|
||||
...process.execArgv,
|
||||
|
|
|
@ -19,7 +19,6 @@ export function getEnvOptions(options: DeepPartial<EnvOptions> = {}): EnvOptions
|
|||
configs: options.configs || [],
|
||||
cliArgs: {
|
||||
dev: true,
|
||||
quiet: false,
|
||||
silent: false,
|
||||
watch: false,
|
||||
basePath: false,
|
||||
|
|
|
@ -11,7 +11,6 @@ Env {
|
|||
"dist": false,
|
||||
"envName": "development",
|
||||
"oss": false,
|
||||
"quiet": false,
|
||||
"runExamples": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
|
@ -54,7 +53,6 @@ Env {
|
|||
"dist": false,
|
||||
"envName": "production",
|
||||
"oss": false,
|
||||
"quiet": false,
|
||||
"runExamples": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
|
@ -96,7 +94,6 @@ Env {
|
|||
"disableOptimizer": true,
|
||||
"dist": false,
|
||||
"oss": false,
|
||||
"quiet": false,
|
||||
"runExamples": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
|
@ -138,7 +135,6 @@ Env {
|
|||
"disableOptimizer": true,
|
||||
"dist": false,
|
||||
"oss": false,
|
||||
"quiet": false,
|
||||
"runExamples": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
|
@ -180,7 +176,6 @@ Env {
|
|||
"disableOptimizer": true,
|
||||
"dist": false,
|
||||
"oss": false,
|
||||
"quiet": false,
|
||||
"runExamples": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
|
@ -222,7 +217,6 @@ Env {
|
|||
"disableOptimizer": true,
|
||||
"dist": false,
|
||||
"oss": false,
|
||||
"quiet": false,
|
||||
"runExamples": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
|
|
|
@ -24,7 +24,7 @@ import {
|
|||
DeprecatedConfigDetails,
|
||||
ChangedDeprecatedPaths,
|
||||
} from './deprecation';
|
||||
import { LegacyObjectToConfigAdapter } from './legacy';
|
||||
import { ObjectToConfigAdapter } from './object_to_config_adapter';
|
||||
|
||||
/** @internal */
|
||||
export type IConfigService = PublicMethodsOf<ConfigService>;
|
||||
|
@ -71,7 +71,7 @@ export class ConfigService {
|
|||
map(([rawConfig, deprecations]) => {
|
||||
const migrated = applyDeprecations(rawConfig, deprecations);
|
||||
this.deprecatedConfigPaths.next(migrated.changedPaths);
|
||||
return new LegacyObjectToConfigAdapter(migrated.config);
|
||||
return new ObjectToConfigAdapter(migrated.config);
|
||||
}),
|
||||
tap((config) => {
|
||||
this.lastConfig = config;
|
||||
|
|
|
@ -21,8 +21,6 @@ export interface EnvOptions {
|
|||
export interface CliArgs {
|
||||
dev: boolean;
|
||||
envName?: string;
|
||||
/** @deprecated */
|
||||
quiet?: boolean;
|
||||
silent?: boolean;
|
||||
verbose?: boolean;
|
||||
watch: boolean;
|
||||
|
|
|
@ -30,5 +30,4 @@ export { Config, ConfigPath, isConfigPath, hasConfigPathIntersection } from './c
|
|||
export { ObjectToConfigAdapter } from './object_to_config_adapter';
|
||||
export { CliArgs, Env, RawPackageInfo } from './env';
|
||||
export { EnvironmentMode, PackageInfo } from './types';
|
||||
export { LegacyObjectToConfigAdapter, LegacyLoggingConfig } from './legacy';
|
||||
export { getPluginSearchPaths } from './plugins';
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`#get correctly handles silent logging config. 1`] = `
|
||||
Object {
|
||||
"appenders": Object {
|
||||
"default": Object {
|
||||
"legacyLoggingConfig": Object {
|
||||
"silent": true,
|
||||
},
|
||||
"type": "legacy-appender",
|
||||
},
|
||||
},
|
||||
"loggers": undefined,
|
||||
"root": Object {
|
||||
"level": "off",
|
||||
},
|
||||
"silent": true,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`#get correctly handles verbose file logging config with json format. 1`] = `
|
||||
Object {
|
||||
"appenders": Object {
|
||||
"default": Object {
|
||||
"legacyLoggingConfig": Object {
|
||||
"dest": "/some/path.log",
|
||||
"json": true,
|
||||
"verbose": true,
|
||||
},
|
||||
"type": "legacy-appender",
|
||||
},
|
||||
},
|
||||
"dest": "/some/path.log",
|
||||
"json": true,
|
||||
"loggers": undefined,
|
||||
"root": Object {
|
||||
"level": "all",
|
||||
},
|
||||
"verbose": true,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`#getFlattenedPaths returns all paths of the underlying object. 1`] = `
|
||||
Array [
|
||||
"known",
|
||||
"knownContainer.sub1",
|
||||
"knownContainer.sub2",
|
||||
"legacy.known",
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`#set correctly sets values for existing paths. 1`] = `
|
||||
Object {
|
||||
"known": "value",
|
||||
"knownContainer": Object {
|
||||
"sub1": "sub-value-1",
|
||||
"sub2": "sub-value-2",
|
||||
},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`#set correctly sets values for paths that do not exist. 1`] = `
|
||||
Object {
|
||||
"unknown": Object {
|
||||
"sub1": "sub-value-1",
|
||||
"sub2": "sub-value-2",
|
||||
},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`#toRaw returns a deep copy of the underlying raw config object. 1`] = `
|
||||
Object {
|
||||
"known": "foo",
|
||||
"knownContainer": Object {
|
||||
"sub1": "bar",
|
||||
"sub2": "baz",
|
||||
},
|
||||
"legacy": Object {
|
||||
"known": "baz",
|
||||
},
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`#toRaw returns a deep copy of the underlying raw config object. 2`] = `
|
||||
Object {
|
||||
"known": "bar",
|
||||
"knownContainer": Object {
|
||||
"sub1": "baz",
|
||||
"sub2": "baz",
|
||||
},
|
||||
"legacy": Object {
|
||||
"known": "baz",
|
||||
},
|
||||
}
|
||||
`;
|
|
@ -1,12 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export {
|
||||
LegacyObjectToConfigAdapter,
|
||||
LegacyLoggingConfig,
|
||||
} from './legacy_object_to_config_adapter';
|
|
@ -1,161 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { LegacyObjectToConfigAdapter } from './legacy_object_to_config_adapter';
|
||||
|
||||
describe('#get', () => {
|
||||
test('correctly handles paths that do not exist.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({});
|
||||
|
||||
expect(configAdapter.get('one')).not.toBeDefined();
|
||||
expect(configAdapter.get(['one', 'two'])).not.toBeDefined();
|
||||
expect(configAdapter.get(['one.three'])).not.toBeDefined();
|
||||
});
|
||||
|
||||
test('correctly handles paths that do not need to be transformed.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
one: 'value-one',
|
||||
two: {
|
||||
sub: 'value-two-sub',
|
||||
},
|
||||
container: {
|
||||
value: 'some',
|
||||
},
|
||||
});
|
||||
|
||||
expect(configAdapter.get('one')).toEqual('value-one');
|
||||
expect(configAdapter.get(['two', 'sub'])).toEqual('value-two-sub');
|
||||
expect(configAdapter.get('two.sub')).toEqual('value-two-sub');
|
||||
expect(configAdapter.get('container')).toEqual({ value: 'some' });
|
||||
});
|
||||
|
||||
test('correctly handles csp config.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
csp: {
|
||||
rules: ['strict'],
|
||||
},
|
||||
});
|
||||
|
||||
expect(configAdapter.get('csp')).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"rules": Array [
|
||||
"strict",
|
||||
],
|
||||
}
|
||||
`);
|
||||
});
|
||||
|
||||
test('correctly handles silent logging config.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
logging: { silent: true },
|
||||
});
|
||||
|
||||
expect(configAdapter.get('logging')).toMatchSnapshot();
|
||||
});
|
||||
|
||||
test('correctly handles verbose file logging config with json format.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
logging: { verbose: true, json: true, dest: '/some/path.log' },
|
||||
});
|
||||
|
||||
expect(configAdapter.get('logging')).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe('#set', () => {
|
||||
test('correctly sets values for paths that do not exist.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({});
|
||||
|
||||
configAdapter.set('unknown', 'value');
|
||||
configAdapter.set(['unknown', 'sub1'], 'sub-value-1');
|
||||
configAdapter.set('unknown.sub2', 'sub-value-2');
|
||||
|
||||
expect(configAdapter.toRaw()).toMatchSnapshot();
|
||||
});
|
||||
|
||||
test('correctly sets values for existing paths.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
known: '',
|
||||
knownContainer: {
|
||||
sub1: 'sub-1',
|
||||
sub2: 'sub-2',
|
||||
},
|
||||
});
|
||||
|
||||
configAdapter.set('known', 'value');
|
||||
configAdapter.set(['knownContainer', 'sub1'], 'sub-value-1');
|
||||
configAdapter.set('knownContainer.sub2', 'sub-value-2');
|
||||
|
||||
expect(configAdapter.toRaw()).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe('#has', () => {
|
||||
test('returns false if config is not set', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({});
|
||||
|
||||
expect(configAdapter.has('unknown')).toBe(false);
|
||||
expect(configAdapter.has(['unknown', 'sub1'])).toBe(false);
|
||||
expect(configAdapter.has('unknown.sub2')).toBe(false);
|
||||
});
|
||||
|
||||
test('returns true if config is set.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
known: 'foo',
|
||||
knownContainer: {
|
||||
sub1: 'bar',
|
||||
sub2: 'baz',
|
||||
},
|
||||
});
|
||||
|
||||
expect(configAdapter.has('known')).toBe(true);
|
||||
expect(configAdapter.has(['knownContainer', 'sub1'])).toBe(true);
|
||||
expect(configAdapter.has('knownContainer.sub2')).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('#toRaw', () => {
|
||||
test('returns a deep copy of the underlying raw config object.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
known: 'foo',
|
||||
knownContainer: {
|
||||
sub1: 'bar',
|
||||
sub2: 'baz',
|
||||
},
|
||||
legacy: { known: 'baz' },
|
||||
});
|
||||
|
||||
const firstRawCopy = configAdapter.toRaw();
|
||||
|
||||
configAdapter.set('known', 'bar');
|
||||
configAdapter.set(['knownContainer', 'sub1'], 'baz');
|
||||
|
||||
const secondRawCopy = configAdapter.toRaw();
|
||||
|
||||
expect(firstRawCopy).not.toBe(secondRawCopy);
|
||||
expect(firstRawCopy.knownContainer).not.toBe(secondRawCopy.knownContainer);
|
||||
|
||||
expect(firstRawCopy).toMatchSnapshot();
|
||||
expect(secondRawCopy).toMatchSnapshot();
|
||||
});
|
||||
});
|
||||
|
||||
describe('#getFlattenedPaths', () => {
|
||||
test('returns all paths of the underlying object.', () => {
|
||||
const configAdapter = new LegacyObjectToConfigAdapter({
|
||||
known: 'foo',
|
||||
knownContainer: {
|
||||
sub1: 'bar',
|
||||
sub2: 'baz',
|
||||
},
|
||||
legacy: { known: 'baz' },
|
||||
});
|
||||
|
||||
expect(configAdapter.getFlattenedPaths()).toMatchSnapshot();
|
||||
});
|
||||
});
|
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ConfigPath } from '../config';
|
||||
import { ObjectToConfigAdapter } from '../object_to_config_adapter';
|
||||
|
||||
/**
|
||||
* Represents logging config supported by the legacy platform.
|
||||
*/
|
||||
export interface LegacyLoggingConfig {
|
||||
silent?: boolean;
|
||||
verbose?: boolean;
|
||||
quiet?: boolean;
|
||||
dest?: string;
|
||||
json?: boolean;
|
||||
events?: Record<string, string>;
|
||||
}
|
||||
|
||||
type MixedLoggingConfig = LegacyLoggingConfig & Record<string, any>;
|
||||
|
||||
/**
|
||||
* Represents adapter between config provided by legacy platform and `Config`
|
||||
* supported by the current platform.
|
||||
* @internal
|
||||
*/
|
||||
export class LegacyObjectToConfigAdapter extends ObjectToConfigAdapter {
|
||||
private static transformLogging(configValue: MixedLoggingConfig = {}) {
|
||||
const { appenders, root, loggers, ...legacyLoggingConfig } = configValue;
|
||||
|
||||
const loggingConfig = {
|
||||
appenders: {
|
||||
...appenders,
|
||||
default: { type: 'legacy-appender', legacyLoggingConfig },
|
||||
},
|
||||
root: { level: 'info', ...root },
|
||||
loggers,
|
||||
...legacyLoggingConfig,
|
||||
};
|
||||
|
||||
if (configValue.silent) {
|
||||
loggingConfig.root.level = 'off';
|
||||
} else if (configValue.quiet) {
|
||||
loggingConfig.root.level = 'error';
|
||||
} else if (configValue.verbose) {
|
||||
loggingConfig.root.level = 'all';
|
||||
}
|
||||
|
||||
return loggingConfig;
|
||||
}
|
||||
|
||||
public get(configPath: ConfigPath) {
|
||||
const configValue = super.get(configPath);
|
||||
switch (configPath) {
|
||||
case 'logging':
|
||||
return LegacyObjectToConfigAdapter.transformLogging(configValue as LegacyLoggingConfig);
|
||||
default:
|
||||
return configValue;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
load("@npm//@bazel/typescript:index.bzl", "ts_config", "ts_project")
|
||||
load("@build_bazel_rules_nodejs//:index.bzl", "js_library", "pkg_npm")
|
||||
load("//src/dev/bazel:index.bzl", "jsts_transpiler")
|
||||
|
||||
PKG_BASE_NAME = "kbn-legacy-logging"
|
||||
PKG_REQUIRE_NAME = "@kbn/legacy-logging"
|
||||
|
||||
SOURCE_FILES = glob(
|
||||
[
|
||||
"src/**/*.ts",
|
||||
],
|
||||
exclude = ["**/*.test.*"],
|
||||
)
|
||||
|
||||
SRCS = SOURCE_FILES
|
||||
|
||||
filegroup(
|
||||
name = "srcs",
|
||||
srcs = SRCS,
|
||||
)
|
||||
|
||||
NPM_MODULE_EXTRA_FILES = [
|
||||
"package.json",
|
||||
"README.md"
|
||||
]
|
||||
|
||||
RUNTIME_DEPS = [
|
||||
"//packages/kbn-config-schema",
|
||||
"//packages/kbn-utils",
|
||||
"@npm//@elastic/numeral",
|
||||
"@npm//@hapi/hapi",
|
||||
"@npm//@hapi/podium",
|
||||
"@npm//chokidar",
|
||||
"@npm//lodash",
|
||||
"@npm//moment-timezone",
|
||||
"@npm//query-string",
|
||||
"@npm//rxjs",
|
||||
"@npm//tslib",
|
||||
]
|
||||
|
||||
TYPES_DEPS = [
|
||||
"//packages/kbn-config-schema",
|
||||
"//packages/kbn-utils",
|
||||
"@npm//@elastic/numeral",
|
||||
"@npm//@hapi/podium",
|
||||
"@npm//chokidar",
|
||||
"@npm//query-string",
|
||||
"@npm//rxjs",
|
||||
"@npm//tslib",
|
||||
"@npm//@types/hapi__hapi",
|
||||
"@npm//@types/jest",
|
||||
"@npm//@types/lodash",
|
||||
"@npm//@types/moment-timezone",
|
||||
"@npm//@types/node",
|
||||
]
|
||||
|
||||
jsts_transpiler(
|
||||
name = "target_node",
|
||||
srcs = SRCS,
|
||||
build_pkg_name = package_name(),
|
||||
)
|
||||
|
||||
ts_config(
|
||||
name = "tsconfig",
|
||||
src = "tsconfig.json",
|
||||
deps = [
|
||||
"//:tsconfig.base.json",
|
||||
"//:tsconfig.bazel.json",
|
||||
],
|
||||
)
|
||||
|
||||
ts_project(
|
||||
name = "tsc_types",
|
||||
args = ['--pretty'],
|
||||
srcs = SRCS,
|
||||
deps = TYPES_DEPS,
|
||||
declaration = True,
|
||||
declaration_map = True,
|
||||
emit_declaration_only = True,
|
||||
out_dir = "target_types",
|
||||
source_map = True,
|
||||
root_dir = "src",
|
||||
tsconfig = ":tsconfig",
|
||||
)
|
||||
|
||||
js_library(
|
||||
name = PKG_BASE_NAME,
|
||||
srcs = NPM_MODULE_EXTRA_FILES,
|
||||
deps = RUNTIME_DEPS + [":target_node", ":tsc_types"],
|
||||
package_name = PKG_REQUIRE_NAME,
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_npm(
|
||||
name = "npm_module",
|
||||
deps = [
|
||||
":%s" % PKG_BASE_NAME,
|
||||
]
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "build",
|
||||
srcs = [
|
||||
":npm_module",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
|
@ -1,4 +0,0 @@
|
|||
# @kbn/legacy-logging
|
||||
|
||||
This package contains the implementation of the legacy logging
|
||||
system, based on `@hapi/good`
|
|
@ -1,13 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
preset: '@kbn/test',
|
||||
rootDir: '../..',
|
||||
roots: ['<rootDir>/packages/kbn-legacy-logging'],
|
||||
};
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
"name": "@kbn/legacy-logging",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"license": "SSPL-1.0 OR Elastic License 2.0",
|
||||
"main": "./target_node/index.js",
|
||||
"types": "./target_types/index.d.ts"
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import _ from 'lodash';
|
||||
import { getLogReporter } from './log_reporter';
|
||||
import { LegacyLoggingConfig } from './schema';
|
||||
|
||||
/**
|
||||
* Returns the `@hapi/good` plugin configuration to be used for the legacy logging
|
||||
* @param config
|
||||
*/
|
||||
export function getLoggingConfiguration(config: LegacyLoggingConfig, opsInterval: number) {
|
||||
const events = config.events;
|
||||
|
||||
if (config.silent) {
|
||||
_.defaults(events, {});
|
||||
} else if (config.quiet) {
|
||||
_.defaults(events, {
|
||||
log: ['listening', 'error', 'fatal'],
|
||||
request: ['error'],
|
||||
error: '*',
|
||||
});
|
||||
} else if (config.verbose) {
|
||||
_.defaults(events, {
|
||||
error: '*',
|
||||
log: '*',
|
||||
// To avoid duplicate logs, we explicitly disable these in verbose
|
||||
// mode as they are already provided by the new logging config under
|
||||
// the `http.server.response` and `metrics.ops` contexts.
|
||||
ops: '!',
|
||||
request: '!',
|
||||
response: '!',
|
||||
});
|
||||
} else {
|
||||
_.defaults(events, {
|
||||
log: ['info', 'warning', 'error', 'fatal'],
|
||||
request: ['info', 'warning', 'error', 'fatal'],
|
||||
error: '*',
|
||||
});
|
||||
}
|
||||
|
||||
const loggerStream = getLogReporter({
|
||||
config: {
|
||||
json: config.json,
|
||||
dest: config.dest,
|
||||
timezone: config.timezone,
|
||||
|
||||
// I'm adding the default here because if you add another filter
|
||||
// using the commandline it will remove authorization. I want users
|
||||
// to have to explicitly set --logging.filter.authorization=none or
|
||||
// --logging.filter.cookie=none to have it show up in the logs.
|
||||
filter: _.defaults(config.filter, {
|
||||
authorization: 'remove',
|
||||
cookie: 'remove',
|
||||
}),
|
||||
},
|
||||
events: _.transform(
|
||||
events,
|
||||
function (filtered: Record<string, string>, val: string, key: string) {
|
||||
// provide a string compatible way to remove events
|
||||
if (val !== '!') filtered[key] = val;
|
||||
},
|
||||
{}
|
||||
),
|
||||
});
|
||||
|
||||
const options = {
|
||||
ops: {
|
||||
interval: opsInterval,
|
||||
},
|
||||
includes: {
|
||||
request: ['headers', 'payload'],
|
||||
response: ['headers', 'payload'],
|
||||
},
|
||||
reporters: {
|
||||
logReporter: [loggerStream],
|
||||
},
|
||||
};
|
||||
return options;
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export { LegacyLoggingConfig, legacyLoggingConfigSchema } from './schema';
|
||||
export { attachMetaData } from './metadata';
|
||||
export { setupLoggingRotate } from './rotate';
|
||||
export { setupLogging, reconfigureLogging } from './setup_logging';
|
||||
export { getLoggingConfiguration } from './get_logging_config';
|
||||
export { LegacyLoggingServer } from './legacy_logging_server';
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
jest.mock('./setup_logging');
|
||||
|
||||
import { LegacyLoggingServer, LogRecord } from './legacy_logging_server';
|
||||
|
||||
test('correctly forwards log records.', () => {
|
||||
const loggingServer = new LegacyLoggingServer({ events: {} });
|
||||
const onLogMock = jest.fn();
|
||||
loggingServer.events.on('log', onLogMock);
|
||||
|
||||
const timestamp = 1554433221100;
|
||||
const firstLogRecord: LogRecord = {
|
||||
timestamp: new Date(timestamp),
|
||||
pid: 5355,
|
||||
level: {
|
||||
id: 'info',
|
||||
value: 5,
|
||||
},
|
||||
context: 'some-context',
|
||||
message: 'some-message',
|
||||
};
|
||||
|
||||
const secondLogRecord: LogRecord = {
|
||||
timestamp: new Date(timestamp),
|
||||
pid: 5355,
|
||||
level: {
|
||||
id: 'error',
|
||||
value: 3,
|
||||
},
|
||||
context: 'some-context.sub-context',
|
||||
message: 'some-message',
|
||||
meta: { unknown: 2 },
|
||||
error: new Error('some-error'),
|
||||
};
|
||||
|
||||
const thirdLogRecord: LogRecord = {
|
||||
timestamp: new Date(timestamp),
|
||||
pid: 5355,
|
||||
level: {
|
||||
id: 'trace',
|
||||
value: 7,
|
||||
},
|
||||
context: 'some-context.sub-context',
|
||||
message: 'some-message',
|
||||
meta: { tags: ['important', 'tags'], unknown: 2 },
|
||||
};
|
||||
|
||||
loggingServer.log(firstLogRecord);
|
||||
loggingServer.log(secondLogRecord);
|
||||
loggingServer.log(thirdLogRecord);
|
||||
|
||||
expect(onLogMock).toHaveBeenCalledTimes(3);
|
||||
|
||||
const [[firstCall], [secondCall], [thirdCall]] = onLogMock.mock.calls;
|
||||
expect(firstCall).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"data": "some-message",
|
||||
"tags": Array [
|
||||
"info",
|
||||
"some-context",
|
||||
],
|
||||
"timestamp": 1554433221100,
|
||||
}
|
||||
`);
|
||||
|
||||
expect(secondCall).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"data": [Error: some-error],
|
||||
"tags": Array [
|
||||
"error",
|
||||
"some-context",
|
||||
"sub-context",
|
||||
],
|
||||
"timestamp": 1554433221100,
|
||||
}
|
||||
`);
|
||||
|
||||
expect(thirdCall).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"data": Object {
|
||||
Symbol(log message with metadata): Object {
|
||||
"message": "some-message",
|
||||
"metadata": Object {
|
||||
"unknown": 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
"tags": Array [
|
||||
"debug",
|
||||
"some-context",
|
||||
"sub-context",
|
||||
"important",
|
||||
"tags",
|
||||
],
|
||||
"timestamp": 1554433221100,
|
||||
}
|
||||
`);
|
||||
});
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ServerExtType, Server } from '@hapi/hapi';
|
||||
import Podium from '@hapi/podium';
|
||||
import { setupLogging } from './setup_logging';
|
||||
import { attachMetaData } from './metadata';
|
||||
import { legacyLoggingConfigSchema } from './schema';
|
||||
|
||||
// these LogXXX types are duplicated to avoid a cross dependency with the @kbn/logging package.
|
||||
// typescript will error if they diverge at some point.
|
||||
type LogLevelId = 'all' | 'fatal' | 'error' | 'warn' | 'info' | 'debug' | 'trace' | 'off';
|
||||
|
||||
interface LogLevel {
|
||||
id: LogLevelId;
|
||||
value: number;
|
||||
}
|
||||
|
||||
export interface LogRecord {
|
||||
timestamp: Date;
|
||||
level: LogLevel;
|
||||
context: string;
|
||||
message: string;
|
||||
error?: Error;
|
||||
meta?: { [name: string]: any };
|
||||
pid: number;
|
||||
}
|
||||
|
||||
const isEmptyObject = (obj: object) => Object.keys(obj).length === 0;
|
||||
|
||||
function getDataToLog(error: Error | undefined, metadata: object, message: string) {
|
||||
if (error) {
|
||||
return error;
|
||||
}
|
||||
if (!isEmptyObject(metadata)) {
|
||||
return attachMetaData(message, metadata);
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
interface PluginRegisterParams {
|
||||
plugin: {
|
||||
register: (
|
||||
server: LegacyLoggingServer,
|
||||
options: PluginRegisterParams['options']
|
||||
) => Promise<void>;
|
||||
};
|
||||
options: Record<string, any>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts core log level to a one that's known to the legacy platform.
|
||||
* @param level Log level from the core.
|
||||
*/
|
||||
function getLegacyLogLevel(level: LogLevel) {
|
||||
const logLevel = level.id.toLowerCase();
|
||||
if (logLevel === 'warn') {
|
||||
return 'warning';
|
||||
}
|
||||
|
||||
if (logLevel === 'trace') {
|
||||
return 'debug';
|
||||
}
|
||||
|
||||
return logLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* The "legacy" Kibana uses Hapi server + even-better plugin to log, so we should
|
||||
* use the same approach here to make log records generated by the core to look the
|
||||
* same as the rest of the records generated by the "legacy" Kibana. But to reduce
|
||||
* overhead of having full blown Hapi server instance we create our own "light" version.
|
||||
* @internal
|
||||
*/
|
||||
export class LegacyLoggingServer {
|
||||
public connections = [];
|
||||
// Emulates Hapi's usage of the podium event bus.
|
||||
public events: Podium = new Podium(['log', 'request', 'response']);
|
||||
|
||||
private onPostStopCallback?: () => void;
|
||||
|
||||
constructor(legacyLoggingConfig: any) {
|
||||
// We set `ops.interval` to max allowed number and `ops` filter to value
|
||||
// that doesn't exist to avoid logging of ops at all, if turned on it will be
|
||||
// logged by the "legacy" Kibana.
|
||||
const loggingConfig = legacyLoggingConfigSchema.validate({
|
||||
...legacyLoggingConfig,
|
||||
events: {
|
||||
...legacyLoggingConfig.events,
|
||||
ops: '__no-ops__',
|
||||
},
|
||||
});
|
||||
|
||||
setupLogging(this as unknown as Server, loggingConfig, 2147483647);
|
||||
}
|
||||
|
||||
public register({ plugin: { register }, options }: PluginRegisterParams): Promise<void> {
|
||||
return register(this, options);
|
||||
}
|
||||
|
||||
public log({ level, context, message, error, timestamp, meta = {} }: LogRecord) {
|
||||
const { tags = [], ...metadata } = meta;
|
||||
|
||||
this.events
|
||||
.emit('log', {
|
||||
data: getDataToLog(error, metadata, message),
|
||||
tags: [getLegacyLogLevel(level), ...context.split('.'), ...tags],
|
||||
timestamp: timestamp.getTime(),
|
||||
})
|
||||
.catch((err) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error('An unexpected error occurred while writing to the log:', err.stack);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
public stop() {
|
||||
// Tell the plugin we're stopping.
|
||||
if (this.onPostStopCallback !== undefined) {
|
||||
this.onPostStopCallback();
|
||||
}
|
||||
}
|
||||
|
||||
public ext(eventName: ServerExtType, callback: () => void) {
|
||||
// method is called by plugin that's being registered.
|
||||
if (eventName === 'onPostStop') {
|
||||
this.onPostStopCallback = callback;
|
||||
}
|
||||
// We don't care about any others the plugin registers
|
||||
}
|
||||
|
||||
public expose() {
|
||||
// method is called by plugin that's being registered.
|
||||
}
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import type { ResponseObject } from '@hapi/hapi';
|
||||
import { EventData, isEventData } from './metadata';
|
||||
|
||||
export interface BaseEvent {
|
||||
event: string;
|
||||
timestamp: number;
|
||||
pid: number;
|
||||
tags?: string[];
|
||||
}
|
||||
|
||||
export interface ResponseEvent extends BaseEvent {
|
||||
event: 'response';
|
||||
method: 'GET' | 'POST' | 'PUT' | 'DELETE';
|
||||
statusCode: number;
|
||||
path: string;
|
||||
headers: Record<string, string | string[]>;
|
||||
responseHeaders: Record<string, string | string[]>;
|
||||
responsePayload: ResponseObject['source'];
|
||||
responseTime: string;
|
||||
query: Record<string, any>;
|
||||
}
|
||||
|
||||
export interface OpsEvent extends BaseEvent {
|
||||
event: 'ops';
|
||||
os: {
|
||||
load: string[];
|
||||
};
|
||||
proc: Record<string, any>;
|
||||
load: string;
|
||||
}
|
||||
|
||||
export interface ErrorEvent extends BaseEvent {
|
||||
event: 'error';
|
||||
error: Error;
|
||||
url: string;
|
||||
}
|
||||
|
||||
export interface UndeclaredErrorEvent extends BaseEvent {
|
||||
error: Error;
|
||||
}
|
||||
|
||||
export interface LogEvent extends BaseEvent {
|
||||
data: EventData;
|
||||
}
|
||||
|
||||
export interface UnkownEvent extends BaseEvent {
|
||||
data: string | Record<string, any>;
|
||||
}
|
||||
|
||||
export type AnyEvent =
|
||||
| ResponseEvent
|
||||
| OpsEvent
|
||||
| ErrorEvent
|
||||
| UndeclaredErrorEvent
|
||||
| LogEvent
|
||||
| UnkownEvent;
|
||||
|
||||
export const isResponseEvent = (e: AnyEvent): e is ResponseEvent => e.event === 'response';
|
||||
export const isOpsEvent = (e: AnyEvent): e is OpsEvent => e.event === 'ops';
|
||||
export const isErrorEvent = (e: AnyEvent): e is ErrorEvent => e.event === 'error';
|
||||
export const isLogEvent = (e: AnyEvent): e is LogEvent => isEventData((e as LogEvent).data);
|
||||
export const isUndeclaredErrorEvent = (e: AnyEvent): e is UndeclaredErrorEvent =>
|
||||
(e as any).error instanceof Error;
|
|
@ -1,176 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import Stream from 'stream';
|
||||
import moment from 'moment-timezone';
|
||||
import _ from 'lodash';
|
||||
import queryString from 'query-string';
|
||||
import numeral from '@elastic/numeral';
|
||||
import chalk from 'chalk';
|
||||
import { inspect } from 'util';
|
||||
|
||||
import { applyFiltersToKeys, getResponsePayloadBytes } from './utils';
|
||||
import { getLogEventData } from './metadata';
|
||||
import { LegacyLoggingConfig } from './schema';
|
||||
import {
|
||||
AnyEvent,
|
||||
ResponseEvent,
|
||||
isResponseEvent,
|
||||
isOpsEvent,
|
||||
isErrorEvent,
|
||||
isLogEvent,
|
||||
isUndeclaredErrorEvent,
|
||||
} from './log_events';
|
||||
|
||||
export type LogFormatConfig = Pick<LegacyLoggingConfig, 'json' | 'dest' | 'timezone' | 'filter'>;
|
||||
|
||||
function serializeError(err: any = {}) {
|
||||
return {
|
||||
message: err.message,
|
||||
name: err.name,
|
||||
stack: err.stack,
|
||||
code: err.code,
|
||||
signal: err.signal,
|
||||
};
|
||||
}
|
||||
|
||||
const levelColor = function (code: number) {
|
||||
if (code < 299) return chalk.green(String(code));
|
||||
if (code < 399) return chalk.yellow(String(code));
|
||||
if (code < 499) return chalk.magentaBright(String(code));
|
||||
return chalk.red(String(code));
|
||||
};
|
||||
|
||||
export abstract class BaseLogFormat extends Stream.Transform {
|
||||
constructor(private readonly config: LogFormatConfig) {
|
||||
super({
|
||||
readableObjectMode: false,
|
||||
writableObjectMode: true,
|
||||
});
|
||||
}
|
||||
|
||||
abstract format(data: Record<string, any>): string;
|
||||
|
||||
filter(data: Record<string, unknown>) {
|
||||
if (!this.config.filter) {
|
||||
return data;
|
||||
}
|
||||
return applyFiltersToKeys(data, this.config.filter);
|
||||
}
|
||||
|
||||
_transform(event: AnyEvent, enc: string, next: Stream.TransformCallback) {
|
||||
const data = this.filter(this.readEvent(event));
|
||||
this.push(this.format(data) + '\n');
|
||||
next();
|
||||
}
|
||||
|
||||
getContentLength({ responsePayload, responseHeaders }: ResponseEvent): number | undefined {
|
||||
try {
|
||||
return getResponsePayloadBytes(responsePayload, responseHeaders);
|
||||
} catch (e) {
|
||||
// We intentionally swallow any errors as this information is
|
||||
// only a nicety for logging purposes, and should not cause the
|
||||
// server to crash if it cannot be determined.
|
||||
this.push(
|
||||
this.format({
|
||||
type: 'log',
|
||||
tags: ['warning', 'logging'],
|
||||
message: `Failed to calculate response payload bytes. [${e}]`,
|
||||
}) + '\n'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
extractAndFormatTimestamp(data: Record<string, any>, format?: string) {
|
||||
const { timezone } = this.config;
|
||||
const date = moment(data['@timestamp']);
|
||||
if (timezone) {
|
||||
date.tz(timezone);
|
||||
}
|
||||
return date.format(format);
|
||||
}
|
||||
|
||||
readEvent(event: AnyEvent) {
|
||||
const data: Record<string, any> = {
|
||||
type: event.event,
|
||||
'@timestamp': event.timestamp,
|
||||
tags: [...(event.tags || [])],
|
||||
pid: event.pid,
|
||||
};
|
||||
|
||||
if (isResponseEvent(event)) {
|
||||
_.defaults(data, _.pick(event, ['method', 'statusCode']));
|
||||
|
||||
const source = _.get(event, 'source', {});
|
||||
data.req = {
|
||||
url: event.path,
|
||||
method: event.method || '',
|
||||
headers: event.headers,
|
||||
remoteAddress: source.remoteAddress,
|
||||
userAgent: source.userAgent,
|
||||
referer: source.referer,
|
||||
};
|
||||
|
||||
data.res = {
|
||||
statusCode: event.statusCode,
|
||||
responseTime: event.responseTime,
|
||||
contentLength: this.getContentLength(event),
|
||||
};
|
||||
|
||||
const query = queryString.stringify(event.query, { sort: false });
|
||||
if (query) {
|
||||
data.req.url += '?' + query;
|
||||
}
|
||||
|
||||
data.message = data.req.method.toUpperCase() + ' ';
|
||||
data.message += data.req.url;
|
||||
data.message += ' ';
|
||||
data.message += levelColor(data.res.statusCode);
|
||||
data.message += ' ';
|
||||
data.message += chalk.gray(data.res.responseTime + 'ms');
|
||||
if (data.res.contentLength) {
|
||||
data.message += chalk.gray(' - ' + numeral(data.res.contentLength).format('0.0b'));
|
||||
}
|
||||
} else if (isOpsEvent(event)) {
|
||||
_.defaults(data, _.pick(event, ['pid', 'os', 'proc', 'load']));
|
||||
data.message = chalk.gray('memory: ');
|
||||
data.message += numeral(_.get(data, 'proc.mem.heapUsed')).format('0.0b');
|
||||
data.message += ' ';
|
||||
data.message += chalk.gray('uptime: ');
|
||||
data.message += numeral(_.get(data, 'proc.uptime')).format('00:00:00');
|
||||
data.message += ' ';
|
||||
data.message += chalk.gray('load: [');
|
||||
data.message += _.get(data, 'os.load', [])
|
||||
.map((val: number) => {
|
||||
return numeral(val).format('0.00');
|
||||
})
|
||||
.join(' ');
|
||||
data.message += chalk.gray(']');
|
||||
data.message += ' ';
|
||||
data.message += chalk.gray('delay: ');
|
||||
data.message += numeral(_.get(data, 'proc.delay')).format('0.000');
|
||||
} else if (isErrorEvent(event)) {
|
||||
data.level = 'error';
|
||||
data.error = serializeError(event.error);
|
||||
data.url = event.url;
|
||||
const message = _.get(event, 'error.message');
|
||||
data.message = message || 'Unknown error (no message)';
|
||||
} else if (isUndeclaredErrorEvent(event)) {
|
||||
data.type = 'error';
|
||||
data.level = _.includes(event.tags, 'fatal') ? 'fatal' : 'error';
|
||||
data.error = serializeError(event.error);
|
||||
const message = _.get(event, 'error.message');
|
||||
data.message = message || 'Unknown error object (no message)';
|
||||
} else if (isLogEvent(event)) {
|
||||
_.assign(data, getLogEventData(event.data));
|
||||
} else {
|
||||
data.message = _.isString(event.data) ? event.data : inspect(event.data);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
}
|
|
@ -1,281 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import moment from 'moment';
|
||||
|
||||
import { attachMetaData } from './metadata';
|
||||
import { createListStream, createPromiseFromStreams } from '@kbn/utils';
|
||||
import { KbnLoggerJsonFormat } from './log_format_json';
|
||||
|
||||
const time = +moment('2010-01-01T05:15:59Z', moment.ISO_8601);
|
||||
|
||||
const makeEvent = (eventType: string) => ({
|
||||
event: eventType,
|
||||
timestamp: time,
|
||||
});
|
||||
|
||||
describe('KbnLoggerJsonFormat', () => {
|
||||
const config: any = {};
|
||||
|
||||
describe('event types and messages', () => {
|
||||
let format: KbnLoggerJsonFormat;
|
||||
beforeEach(() => {
|
||||
format = new KbnLoggerJsonFormat(config);
|
||||
});
|
||||
|
||||
it('log', async () => {
|
||||
const result = await createPromiseFromStreams<string>([
|
||||
createListStream([makeEvent('log')]),
|
||||
format,
|
||||
]);
|
||||
const { type, message } = JSON.parse(result);
|
||||
|
||||
expect(type).toBe('log');
|
||||
expect(message).toBe('undefined');
|
||||
});
|
||||
|
||||
describe('response', () => {
|
||||
it('handles a response object', async () => {
|
||||
const event = {
|
||||
...makeEvent('response'),
|
||||
statusCode: 200,
|
||||
contentLength: 800,
|
||||
responseTime: 12000,
|
||||
method: 'GET',
|
||||
path: '/path/to/resource',
|
||||
responsePayload: '1234567879890',
|
||||
source: {
|
||||
remoteAddress: '127.0.0.1',
|
||||
userAgent: 'Test Thing',
|
||||
referer: 'elastic.co',
|
||||
},
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { type, method, statusCode, message, req } = JSON.parse(result);
|
||||
|
||||
expect(type).toBe('response');
|
||||
expect(method).toBe('GET');
|
||||
expect(statusCode).toBe(200);
|
||||
expect(message).toBe('GET /path/to/resource 200 12000ms - 13.0B');
|
||||
expect(req.remoteAddress).toBe('127.0.0.1');
|
||||
expect(req.userAgent).toBe('Test Thing');
|
||||
});
|
||||
|
||||
it('leaves payload size empty if not available', async () => {
|
||||
const event = {
|
||||
...makeEvent('response'),
|
||||
statusCode: 200,
|
||||
responseTime: 12000,
|
||||
method: 'GET',
|
||||
path: '/path/to/resource',
|
||||
responsePayload: null,
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
expect(JSON.parse(result).message).toBe('GET /path/to/resource 200 12000ms');
|
||||
});
|
||||
});
|
||||
|
||||
it('ops', async () => {
|
||||
const event = {
|
||||
...makeEvent('ops'),
|
||||
os: {
|
||||
load: [1, 1, 2],
|
||||
},
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { type, message } = JSON.parse(result);
|
||||
|
||||
expect(type).toBe('ops');
|
||||
expect(message).toBe('memory: 0.0B uptime: 0:00:00 load: [1.00 1.00 2.00] delay: 0.000');
|
||||
});
|
||||
|
||||
describe('with metadata', () => {
|
||||
it('logs an event with meta data', async () => {
|
||||
const event = {
|
||||
data: attachMetaData('message for event', {
|
||||
prop1: 'value1',
|
||||
prop2: 'value2',
|
||||
}),
|
||||
tags: ['tag1', 'tag2'],
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, prop1, prop2, tags } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe(undefined);
|
||||
expect(message).toBe('message for event');
|
||||
expect(prop1).toBe('value1');
|
||||
expect(prop2).toBe('value2');
|
||||
expect(tags).toEqual(['tag1', 'tag2']);
|
||||
});
|
||||
|
||||
it('meta data rewrites event fields', async () => {
|
||||
const event = {
|
||||
data: attachMetaData('message for event', {
|
||||
tags: ['meta-data-tag'],
|
||||
prop1: 'value1',
|
||||
prop2: 'value2',
|
||||
}),
|
||||
tags: ['tag1', 'tag2'],
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, prop1, prop2, tags } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe(undefined);
|
||||
expect(message).toBe('message for event');
|
||||
expect(prop1).toBe('value1');
|
||||
expect(prop2).toBe('value2');
|
||||
expect(tags).toEqual(['meta-data-tag']);
|
||||
});
|
||||
|
||||
it('logs an event with empty meta data', async () => {
|
||||
const event = {
|
||||
data: attachMetaData('message for event'),
|
||||
tags: ['tag1', 'tag2'],
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, prop1, prop2, tags } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe(undefined);
|
||||
expect(message).toBe('message for event');
|
||||
expect(prop1).toBe(undefined);
|
||||
expect(prop2).toBe(undefined);
|
||||
expect(tags).toEqual(['tag1', 'tag2']);
|
||||
});
|
||||
|
||||
it('does not log meta data for an error event', async () => {
|
||||
const event = {
|
||||
error: new Error('reason'),
|
||||
data: attachMetaData('message for event', {
|
||||
prop1: 'value1',
|
||||
prop2: 'value2',
|
||||
}),
|
||||
tags: ['tag1', 'tag2'],
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, prop1, prop2, tags } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe('error');
|
||||
expect(message).toBe('reason');
|
||||
expect(prop1).toBe(undefined);
|
||||
expect(prop2).toBe(undefined);
|
||||
expect(tags).toEqual(['tag1', 'tag2']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('errors', () => {
|
||||
it('error type', async () => {
|
||||
const event = {
|
||||
...makeEvent('error'),
|
||||
error: {
|
||||
message: 'test error 0',
|
||||
},
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, error } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe('error');
|
||||
expect(message).toBe('test error 0');
|
||||
expect(error).toEqual({ message: 'test error 0' });
|
||||
});
|
||||
|
||||
it('with no message', async () => {
|
||||
const event = {
|
||||
event: 'error',
|
||||
error: {},
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, error } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe('error');
|
||||
expect(message).toBe('Unknown error (no message)');
|
||||
expect(error).toEqual({});
|
||||
});
|
||||
|
||||
it('event error instanceof Error', async () => {
|
||||
const event = {
|
||||
error: new Error('test error 2') as any,
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, error } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe('error');
|
||||
expect(message).toBe('test error 2');
|
||||
|
||||
expect(error.message).toBe(event.error.message);
|
||||
expect(error.name).toBe(event.error.name);
|
||||
expect(error.stack).toBe(event.error.stack);
|
||||
expect(error.code).toBe(event.error.code);
|
||||
expect(error.signal).toBe(event.error.signal);
|
||||
});
|
||||
|
||||
it('event error instanceof Error - fatal', async () => {
|
||||
const event = {
|
||||
error: new Error('test error 2') as any,
|
||||
tags: ['fatal', 'tag2'],
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { tags, level, message, error } = JSON.parse(result);
|
||||
|
||||
expect(tags).toEqual(['fatal', 'tag2']);
|
||||
expect(level).toBe('fatal');
|
||||
expect(message).toBe('test error 2');
|
||||
|
||||
expect(error.message).toBe(event.error.message);
|
||||
expect(error.name).toBe(event.error.name);
|
||||
expect(error.stack).toBe(event.error.stack);
|
||||
expect(error.code).toBe(event.error.code);
|
||||
expect(error.signal).toBe(event.error.signal);
|
||||
});
|
||||
|
||||
it('event error instanceof Error, no message', async () => {
|
||||
const event = {
|
||||
error: new Error('') as any,
|
||||
};
|
||||
const result = await createPromiseFromStreams<string>([createListStream([event]), format]);
|
||||
const { level, message, error } = JSON.parse(result);
|
||||
|
||||
expect(level).toBe('error');
|
||||
expect(message).toBe('Unknown error object (no message)');
|
||||
|
||||
expect(error.message).toBe(event.error.message);
|
||||
expect(error.name).toBe(event.error.name);
|
||||
expect(error.stack).toBe(event.error.stack);
|
||||
expect(error.code).toBe(event.error.code);
|
||||
expect(error.signal).toBe(event.error.signal);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('timezone', () => {
|
||||
it('logs in UTC', async () => {
|
||||
const format = new KbnLoggerJsonFormat({
|
||||
timezone: 'UTC',
|
||||
} as any);
|
||||
|
||||
const result = await createPromiseFromStreams<string>([
|
||||
createListStream([makeEvent('log')]),
|
||||
format,
|
||||
]);
|
||||
|
||||
const { '@timestamp': timestamp } = JSON.parse(result);
|
||||
expect(timestamp).toBe(moment.utc(time).format());
|
||||
});
|
||||
|
||||
it('logs in local timezone timezone is undefined', async () => {
|
||||
const format = new KbnLoggerJsonFormat({} as any);
|
||||
|
||||
const result = await createPromiseFromStreams<string>([
|
||||
createListStream([makeEvent('log')]),
|
||||
format,
|
||||
]);
|
||||
|
||||
const { '@timestamp': timestamp } = JSON.parse(result);
|
||||
expect(timestamp).toBe(moment(time).format());
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
// @ts-expect-error missing type def
|
||||
import stringify from 'json-stringify-safe';
|
||||
import { BaseLogFormat } from './log_format';
|
||||
|
||||
const stripColors = function (string: string) {
|
||||
return string.replace(/\u001b[^m]+m/g, '');
|
||||
};
|
||||
|
||||
export class KbnLoggerJsonFormat extends BaseLogFormat {
|
||||
format(data: Record<string, any>) {
|
||||
data.message = stripColors(data.message);
|
||||
data['@timestamp'] = this.extractAndFormatTimestamp(data);
|
||||
return stringify(data);
|
||||
}
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import moment from 'moment';
|
||||
|
||||
import { attachMetaData } from './metadata';
|
||||
import { createListStream, createPromiseFromStreams } from '@kbn/utils';
|
||||
import { KbnLoggerStringFormat } from './log_format_string';
|
||||
|
||||
const time = +moment('2010-01-01T05:15:59Z', moment.ISO_8601);
|
||||
|
||||
const makeEvent = () => ({
|
||||
event: 'log',
|
||||
timestamp: time,
|
||||
tags: ['tag'],
|
||||
pid: 1,
|
||||
data: 'my log message',
|
||||
});
|
||||
|
||||
describe('KbnLoggerStringFormat', () => {
|
||||
it('logs in UTC', async () => {
|
||||
const format = new KbnLoggerStringFormat({
|
||||
timezone: 'UTC',
|
||||
} as any);
|
||||
|
||||
const result = await createPromiseFromStreams([createListStream([makeEvent()]), format]);
|
||||
|
||||
expect(String(result)).toContain(moment.utc(time).format('HH:mm:ss.SSS'));
|
||||
});
|
||||
|
||||
it('logs in local timezone when timezone is undefined', async () => {
|
||||
const format = new KbnLoggerStringFormat({} as any);
|
||||
|
||||
const result = await createPromiseFromStreams([createListStream([makeEvent()]), format]);
|
||||
|
||||
expect(String(result)).toContain(moment(time).format('HH:mm:ss.SSS'));
|
||||
});
|
||||
describe('with metadata', () => {
|
||||
it('does not log meta data', async () => {
|
||||
const format = new KbnLoggerStringFormat({} as any);
|
||||
const event = {
|
||||
data: attachMetaData('message for event', {
|
||||
prop1: 'value1',
|
||||
}),
|
||||
tags: ['tag1', 'tag2'],
|
||||
};
|
||||
|
||||
const result = await createPromiseFromStreams([createListStream([event]), format]);
|
||||
|
||||
const resultString = String(result);
|
||||
expect(resultString).toContain('tag1');
|
||||
expect(resultString).toContain('tag2');
|
||||
expect(resultString).toContain('message for event');
|
||||
|
||||
expect(resultString).not.toContain('value1');
|
||||
expect(resultString).not.toContain('prop1');
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import _ from 'lodash';
|
||||
import chalk from 'chalk';
|
||||
|
||||
import { BaseLogFormat } from './log_format';
|
||||
|
||||
const statuses = ['err', 'info', 'error', 'warning', 'fatal', 'status', 'debug'];
|
||||
|
||||
const typeColors: Record<string, string> = {
|
||||
log: 'white',
|
||||
req: 'green',
|
||||
res: 'green',
|
||||
ops: 'cyan',
|
||||
config: 'cyan',
|
||||
err: 'red',
|
||||
info: 'green',
|
||||
error: 'red',
|
||||
warning: 'red',
|
||||
fatal: 'magentaBright',
|
||||
status: 'yellowBright',
|
||||
debug: 'gray',
|
||||
server: 'gray',
|
||||
optmzr: 'white',
|
||||
manager: 'green',
|
||||
optimize: 'magentaBright',
|
||||
listening: 'magentaBright',
|
||||
scss: 'magentaBright',
|
||||
};
|
||||
|
||||
const color = _.memoize((name: string): ((...text: string[]) => string) => {
|
||||
// @ts-expect-error couldn't even get rid of the error with an any cast
|
||||
return chalk[typeColors[name]] || _.identity;
|
||||
});
|
||||
|
||||
const type = _.memoize((t: string) => {
|
||||
return color(t)(_.pad(t, 7).slice(0, 7));
|
||||
});
|
||||
|
||||
const prefix = process.env.isDevCliChild ? `${type('server')} ` : '';
|
||||
|
||||
export class KbnLoggerStringFormat extends BaseLogFormat {
|
||||
format(data: Record<string, any>) {
|
||||
const time = color('time')(this.extractAndFormatTimestamp(data, 'HH:mm:ss.SSS'));
|
||||
const msg = data.error ? color('error')(data.error.stack) : color('message')(data.message);
|
||||
|
||||
const tags = _(data.tags)
|
||||
.sortBy(function (tag) {
|
||||
if (color(tag) === _.identity) return `2${tag}`;
|
||||
if (_.includes(statuses, tag)) return `0${tag}`;
|
||||
return `1${tag}`;
|
||||
})
|
||||
.reduce(function (s, t) {
|
||||
return s + `[${color(t)(t)}]`;
|
||||
}, '');
|
||||
|
||||
return `${prefix}${type(data.type)} [${time}] ${tags} ${msg}`;
|
||||
}
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ErrorEvent } from './log_events';
|
||||
import { LogInterceptor } from './log_interceptor';
|
||||
|
||||
function stubClientErrorEvent(errorMeta: Record<string, any>): ErrorEvent {
|
||||
const error = new Error();
|
||||
Object.assign(error, errorMeta);
|
||||
return {
|
||||
event: 'error',
|
||||
url: '',
|
||||
pid: 1234,
|
||||
timestamp: Date.now(),
|
||||
tags: ['connection', 'client', 'error'],
|
||||
error,
|
||||
};
|
||||
}
|
||||
|
||||
const stubEconnresetEvent = () => stubClientErrorEvent({ code: 'ECONNRESET' });
|
||||
const stubEpipeEvent = () => stubClientErrorEvent({ errno: 'EPIPE' });
|
||||
const stubEcanceledEvent = () => stubClientErrorEvent({ errno: 'ECANCELED' });
|
||||
|
||||
function assertDowngraded(transformed: Record<string, any>) {
|
||||
expect(!!transformed).toBe(true);
|
||||
expect(transformed).toHaveProperty('event', 'log');
|
||||
expect(transformed).toHaveProperty('tags');
|
||||
expect(transformed.tags).not.toContain('error');
|
||||
}
|
||||
|
||||
describe('server logging LogInterceptor', () => {
|
||||
describe('#downgradeIfEconnreset()', () => {
|
||||
it('transforms ECONNRESET events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEconnresetEvent();
|
||||
assertDowngraded(interceptor.downgradeIfEconnreset(event)!);
|
||||
});
|
||||
|
||||
it('does not match if the tags are not in order', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEconnresetEvent();
|
||||
event.tags = [...event.tags!.slice(1), event.tags![0]];
|
||||
expect(interceptor.downgradeIfEconnreset(event)).toBe(null);
|
||||
});
|
||||
|
||||
it('ignores non ECONNRESET events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({ errno: 'not ECONNRESET' });
|
||||
expect(interceptor.downgradeIfEconnreset(event)).toBe(null);
|
||||
});
|
||||
|
||||
it('ignores if tags are wrong', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEconnresetEvent();
|
||||
event.tags = ['different', 'tags'];
|
||||
expect(interceptor.downgradeIfEconnreset(event)).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('#downgradeIfEpipe()', () => {
|
||||
it('transforms EPIPE events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEpipeEvent();
|
||||
assertDowngraded(interceptor.downgradeIfEpipe(event)!);
|
||||
});
|
||||
|
||||
it('does not match if the tags are not in order', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEpipeEvent();
|
||||
event.tags = [...event.tags!.slice(1), event.tags![0]];
|
||||
expect(interceptor.downgradeIfEpipe(event)).toBe(null);
|
||||
});
|
||||
|
||||
it('ignores non EPIPE events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({ errno: 'not EPIPE' });
|
||||
expect(interceptor.downgradeIfEpipe(event)).toBe(null);
|
||||
});
|
||||
|
||||
it('ignores if tags are wrong', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEpipeEvent();
|
||||
event.tags = ['different', 'tags'];
|
||||
expect(interceptor.downgradeIfEpipe(event)).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('#downgradeIfEcanceled()', () => {
|
||||
it('transforms ECANCELED events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEcanceledEvent();
|
||||
assertDowngraded(interceptor.downgradeIfEcanceled(event)!);
|
||||
});
|
||||
|
||||
it('does not match if the tags are not in order', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEcanceledEvent();
|
||||
event.tags = [...event.tags!.slice(1), event.tags![0]];
|
||||
expect(interceptor.downgradeIfEcanceled(event)).toBe(null);
|
||||
});
|
||||
|
||||
it('ignores non ECANCELED events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({ errno: 'not ECANCELLED' });
|
||||
expect(interceptor.downgradeIfEcanceled(event)).toBe(null);
|
||||
});
|
||||
|
||||
it('ignores if tags are wrong', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubEcanceledEvent();
|
||||
event.tags = ['different', 'tags'];
|
||||
expect(interceptor.downgradeIfEcanceled(event)).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('#downgradeIfHTTPSWhenHTTP', () => {
|
||||
it('transforms https requests when serving http errors', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({ message: 'Parse Error', code: 'HPE_INVALID_METHOD' });
|
||||
assertDowngraded(interceptor.downgradeIfHTTPSWhenHTTP(event)!);
|
||||
});
|
||||
|
||||
it('ignores non events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({
|
||||
message: 'Parse Error',
|
||||
code: 'NOT_HPE_INVALID_METHOD',
|
||||
});
|
||||
expect(interceptor.downgradeIfEcanceled(event)).toBe(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('#downgradeIfHTTPWhenHTTPS', () => {
|
||||
it('transforms http requests when serving https errors', () => {
|
||||
const message =
|
||||
'4584650176:error:1408F09C:SSL routines:ssl3_get_record:http request:../deps/openssl/openssl/ssl/record/ssl3_record.c:322:\n';
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({ message });
|
||||
assertDowngraded(interceptor.downgradeIfHTTPWhenHTTPS(event)!);
|
||||
});
|
||||
|
||||
it('ignores non events', () => {
|
||||
const interceptor = new LogInterceptor();
|
||||
const event = stubClientErrorEvent({ message: 'Not error' });
|
||||
expect(interceptor.downgradeIfEcanceled(event)).toBe(null);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import Stream from 'stream';
|
||||
import { get, isEqual } from 'lodash';
|
||||
import { AnyEvent } from './log_events';
|
||||
|
||||
/**
|
||||
* Matches error messages when clients connect via HTTP instead of HTTPS; see unit test for full message. Warning: this can change when Node
|
||||
* and its bundled OpenSSL binary are upgraded.
|
||||
*/
|
||||
const OPENSSL_GET_RECORD_REGEX = /ssl3_get_record:http/;
|
||||
|
||||
function doTagsMatch(event: AnyEvent, tags: string[]) {
|
||||
return isEqual(event.tags, tags);
|
||||
}
|
||||
|
||||
function doesMessageMatch(errorMessage: string, match: RegExp | string) {
|
||||
if (!errorMessage) {
|
||||
return false;
|
||||
}
|
||||
if (match instanceof RegExp) {
|
||||
return match.test(errorMessage);
|
||||
}
|
||||
return errorMessage === match;
|
||||
}
|
||||
|
||||
// converts the given event into a debug log if it's an error of the given type
|
||||
function downgradeIfErrorType(errorType: string, event: AnyEvent) {
|
||||
const isClientError = doTagsMatch(event, ['connection', 'client', 'error']);
|
||||
if (!isClientError) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const matchesErrorType =
|
||||
get(event, 'error.code') === errorType || get(event, 'error.errno') === errorType;
|
||||
if (!matchesErrorType) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const errorTypeTag = errorType.toLowerCase();
|
||||
|
||||
return {
|
||||
event: 'log',
|
||||
pid: event.pid,
|
||||
timestamp: event.timestamp,
|
||||
tags: ['debug', 'connection', errorTypeTag],
|
||||
data: `${errorType}: Socket was closed by the client (probably the browser) before it could be read completely`,
|
||||
};
|
||||
}
|
||||
|
||||
function downgradeIfErrorMessage(match: RegExp | string, event: AnyEvent) {
|
||||
const isClientError = doTagsMatch(event, ['connection', 'client', 'error']);
|
||||
const errorMessage = get(event, 'error.message');
|
||||
const matchesErrorMessage = isClientError && doesMessageMatch(errorMessage, match);
|
||||
|
||||
if (!matchesErrorMessage) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
event: 'log',
|
||||
pid: event.pid,
|
||||
timestamp: event.timestamp,
|
||||
tags: ['debug', 'connection'],
|
||||
data: errorMessage,
|
||||
};
|
||||
}
|
||||
|
||||
export class LogInterceptor extends Stream.Transform {
|
||||
constructor() {
|
||||
super({
|
||||
readableObjectMode: true,
|
||||
writableObjectMode: true,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Since the upgrade to hapi 14, any socket read
|
||||
* error is surfaced as a generic "client error"
|
||||
* but "ECONNRESET" specifically is not useful for the
|
||||
* logs unless you are trying to debug edge-case behaviors.
|
||||
*
|
||||
* For that reason, we downgrade this from error to debug level
|
||||
*
|
||||
* @param {object} - log event
|
||||
*/
|
||||
downgradeIfEconnreset(event: AnyEvent) {
|
||||
return downgradeIfErrorType('ECONNRESET', event);
|
||||
}
|
||||
|
||||
/**
|
||||
* Since the upgrade to hapi 14, any socket write
|
||||
* error is surfaced as a generic "client error"
|
||||
* but "EPIPE" specifically is not useful for the
|
||||
* logs unless you are trying to debug edge-case behaviors.
|
||||
*
|
||||
* For that reason, we downgrade this from error to debug level
|
||||
*
|
||||
* @param {object} - log event
|
||||
*/
|
||||
downgradeIfEpipe(event: AnyEvent) {
|
||||
return downgradeIfErrorType('EPIPE', event);
|
||||
}
|
||||
|
||||
/**
|
||||
* Since the upgrade to hapi 14, any socket write
|
||||
* error is surfaced as a generic "client error"
|
||||
* but "ECANCELED" specifically is not useful for the
|
||||
* logs unless you are trying to debug edge-case behaviors.
|
||||
*
|
||||
* For that reason, we downgrade this from error to debug level
|
||||
*
|
||||
* @param {object} - log event
|
||||
*/
|
||||
downgradeIfEcanceled(event: AnyEvent) {
|
||||
return downgradeIfErrorType('ECANCELED', event);
|
||||
}
|
||||
|
||||
downgradeIfHTTPSWhenHTTP(event: AnyEvent) {
|
||||
return downgradeIfErrorType('HPE_INVALID_METHOD', event);
|
||||
}
|
||||
|
||||
downgradeIfHTTPWhenHTTPS(event: AnyEvent) {
|
||||
return downgradeIfErrorMessage(OPENSSL_GET_RECORD_REGEX, event);
|
||||
}
|
||||
|
||||
_transform(event: AnyEvent, enc: string, next: Stream.TransformCallback) {
|
||||
const downgraded =
|
||||
this.downgradeIfEconnreset(event) ||
|
||||
this.downgradeIfEpipe(event) ||
|
||||
this.downgradeIfEcanceled(event) ||
|
||||
this.downgradeIfHTTPSWhenHTTP(event) ||
|
||||
this.downgradeIfHTTPWhenHTTPS(event);
|
||||
|
||||
this.push(downgraded || event);
|
||||
next();
|
||||
}
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import os from 'os';
|
||||
import path from 'path';
|
||||
import fs from 'fs';
|
||||
|
||||
import stripAnsi from 'strip-ansi';
|
||||
|
||||
import { getLogReporter } from './log_reporter';
|
||||
|
||||
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
||||
describe('getLogReporter', () => {
|
||||
it('should log to stdout (not json)', async () => {
|
||||
const lines: string[] = [];
|
||||
const origWrite = process.stdout.write;
|
||||
process.stdout.write = (buffer: string | Uint8Array): boolean => {
|
||||
lines.push(stripAnsi(buffer.toString()).trim());
|
||||
return true;
|
||||
};
|
||||
|
||||
const loggerStream = getLogReporter({
|
||||
config: {
|
||||
json: false,
|
||||
dest: 'stdout',
|
||||
filter: {},
|
||||
},
|
||||
events: { log: '*' },
|
||||
});
|
||||
|
||||
loggerStream.end({ event: 'log', tags: ['foo'], data: 'hello world' });
|
||||
|
||||
await sleep(500);
|
||||
|
||||
process.stdout.write = origWrite;
|
||||
expect(lines.length).toBe(1);
|
||||
expect(lines[0]).toMatch(/^log \[[^\]]*\] \[foo\] hello world$/);
|
||||
});
|
||||
|
||||
it('should log to stdout (as json)', async () => {
|
||||
const lines: string[] = [];
|
||||
const origWrite = process.stdout.write;
|
||||
process.stdout.write = (buffer: string | Uint8Array): boolean => {
|
||||
lines.push(JSON.parse(buffer.toString().trim()));
|
||||
return true;
|
||||
};
|
||||
|
||||
const loggerStream = getLogReporter({
|
||||
config: {
|
||||
json: true,
|
||||
dest: 'stdout',
|
||||
filter: {},
|
||||
},
|
||||
events: { log: '*' },
|
||||
});
|
||||
|
||||
loggerStream.end({ event: 'log', tags: ['foo'], data: 'hello world' });
|
||||
|
||||
await sleep(500);
|
||||
|
||||
process.stdout.write = origWrite;
|
||||
expect(lines.length).toBe(1);
|
||||
expect(lines[0]).toMatchObject({
|
||||
type: 'log',
|
||||
tags: ['foo'],
|
||||
message: 'hello world',
|
||||
});
|
||||
});
|
||||
|
||||
it('should log to custom file (not json)', async () => {
|
||||
const dir = os.tmpdir();
|
||||
const logfile = `dest-${Date.now()}.log`;
|
||||
const dest = path.join(dir, logfile);
|
||||
|
||||
const loggerStream = getLogReporter({
|
||||
config: {
|
||||
json: false,
|
||||
dest,
|
||||
filter: {},
|
||||
},
|
||||
events: { log: '*' },
|
||||
});
|
||||
|
||||
loggerStream.end({ event: 'log', tags: ['foo'], data: 'hello world' });
|
||||
|
||||
await sleep(500);
|
||||
|
||||
const lines = stripAnsi(fs.readFileSync(dest, { encoding: 'utf8' }))
|
||||
.trim()
|
||||
.split(os.EOL);
|
||||
expect(lines.length).toBe(1);
|
||||
expect(lines[0]).toMatch(/^log \[[^\]]*\] \[foo\] hello world$/);
|
||||
});
|
||||
|
||||
it('should log to custom file (as json)', async () => {
|
||||
const dir = os.tmpdir();
|
||||
const logfile = `dest-${Date.now()}.log`;
|
||||
const dest = path.join(dir, logfile);
|
||||
|
||||
const loggerStream = getLogReporter({
|
||||
config: {
|
||||
json: true,
|
||||
dest,
|
||||
filter: {},
|
||||
},
|
||||
events: { log: '*' },
|
||||
});
|
||||
|
||||
loggerStream.end({ event: 'log', tags: ['foo'], data: 'hello world' });
|
||||
|
||||
await sleep(500);
|
||||
|
||||
const lines = fs
|
||||
.readFileSync(dest, { encoding: 'utf8' })
|
||||
.trim()
|
||||
.split(os.EOL)
|
||||
.map((data) => JSON.parse(data));
|
||||
expect(lines.length).toBe(1);
|
||||
expect(lines[0]).toMatchObject({
|
||||
type: 'log',
|
||||
tags: ['foo'],
|
||||
message: 'hello world',
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { createWriteStream } from 'fs';
|
||||
import { pipeline } from 'stream';
|
||||
|
||||
// @ts-expect-error missing type def
|
||||
import { Squeeze } from '@hapi/good-squeeze';
|
||||
|
||||
import { KbnLoggerJsonFormat } from './log_format_json';
|
||||
import { KbnLoggerStringFormat } from './log_format_string';
|
||||
import { LogInterceptor } from './log_interceptor';
|
||||
import { LogFormatConfig } from './log_format';
|
||||
|
||||
export function getLogReporter({ events, config }: { events: any; config: LogFormatConfig }) {
|
||||
const squeeze = new Squeeze(events);
|
||||
const format = config.json ? new KbnLoggerJsonFormat(config) : new KbnLoggerStringFormat(config);
|
||||
const logInterceptor = new LogInterceptor();
|
||||
|
||||
if (config.dest === 'stdout') {
|
||||
pipeline(logInterceptor, squeeze, format, onFinished);
|
||||
// The `pipeline` function is used to properly close all streams in the
|
||||
// pipeline in case one of them ends or fails. Since stdout obviously
|
||||
// shouldn't be closed in case of a failure in one of the other streams,
|
||||
// we're not including that in the call to `pipeline`, but rely on the old
|
||||
// `pipe` function instead.
|
||||
format.pipe(process.stdout);
|
||||
} else {
|
||||
const dest = createWriteStream(config.dest, {
|
||||
flags: 'a',
|
||||
encoding: 'utf8',
|
||||
});
|
||||
pipeline(logInterceptor, squeeze, format, dest, onFinished);
|
||||
}
|
||||
|
||||
return logInterceptor;
|
||||
}
|
||||
|
||||
function onFinished(err: NodeJS.ErrnoException | null) {
|
||||
if (err) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error('An unexpected error occurred in the logging pipeline:', err.stack);
|
||||
}
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { isPlainObject } from 'lodash';
|
||||
|
||||
export const metadataSymbol = Symbol('log message with metadata');
|
||||
|
||||
export interface EventData {
|
||||
[metadataSymbol]?: EventMetadata;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export interface EventMetadata {
|
||||
message: string;
|
||||
metadata: Record<string, any>;
|
||||
}
|
||||
|
||||
export const isEventData = (eventData: EventData) => {
|
||||
return Boolean(isPlainObject(eventData) && eventData[metadataSymbol]);
|
||||
};
|
||||
|
||||
export const getLogEventData = (eventData: EventData) => {
|
||||
const { message, metadata } = eventData[metadataSymbol]!;
|
||||
return {
|
||||
...metadata,
|
||||
message,
|
||||
};
|
||||
};
|
||||
|
||||
export const attachMetaData = (message: string, metadata: Record<string, any> = {}) => {
|
||||
return {
|
||||
[metadataSymbol]: {
|
||||
message,
|
||||
metadata,
|
||||
},
|
||||
};
|
||||
};
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Server } from '@hapi/hapi';
|
||||
import { LogRotator } from './log_rotator';
|
||||
import { LegacyLoggingConfig } from '../schema';
|
||||
|
||||
let logRotator: LogRotator;
|
||||
|
||||
export async function setupLoggingRotate(server: Server, config: LegacyLoggingConfig) {
|
||||
// If log rotate is not enabled we skip
|
||||
if (!config.rotate.enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
// We don't want to run logging rotate server if
|
||||
// we are not logging to a file
|
||||
if (config.dest === 'stdout') {
|
||||
server.log(
|
||||
['warning', 'logging:rotate'],
|
||||
'Log rotation is enabled but logging.dest is configured for stdout. Set logging.dest to a file for this setting to take effect.'
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Enable Logging Rotate Service
|
||||
// We need the master process and it can
|
||||
// try to setupLoggingRotate more than once,
|
||||
// so we'll need to assure it only loads once.
|
||||
if (!logRotator) {
|
||||
logRotator = new LogRotator(config, server);
|
||||
await logRotator.start();
|
||||
}
|
||||
|
||||
return logRotator;
|
||||
}
|
|
@ -1,261 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import del from 'del';
|
||||
import fs, { existsSync, mkdirSync, statSync, writeFileSync } from 'fs';
|
||||
import { tmpdir } from 'os';
|
||||
import { dirname, join } from 'path';
|
||||
import { LogRotator } from './log_rotator';
|
||||
import { LegacyLoggingConfig } from '../schema';
|
||||
|
||||
const mockOn = jest.fn();
|
||||
jest.mock('chokidar', () => ({
|
||||
watch: jest.fn(() => ({
|
||||
on: mockOn,
|
||||
close: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock('lodash', () => ({
|
||||
...(jest.requireActual('lodash') as any),
|
||||
throttle: (fn: any) => fn,
|
||||
}));
|
||||
|
||||
const tempDir = join(tmpdir(), 'kbn_log_rotator_test');
|
||||
const testFilePath = join(tempDir, 'log_rotator_test_log_file.log');
|
||||
|
||||
const createLogRotatorConfig = (logFilePath: string): LegacyLoggingConfig => {
|
||||
return {
|
||||
dest: logFilePath,
|
||||
rotate: {
|
||||
enabled: true,
|
||||
keepFiles: 2,
|
||||
everyBytes: 2,
|
||||
usePolling: false,
|
||||
pollingInterval: 10000,
|
||||
pollingPolicyTestTimeout: 4000,
|
||||
},
|
||||
} as LegacyLoggingConfig;
|
||||
};
|
||||
|
||||
const mockServer: any = {
|
||||
log: jest.fn(),
|
||||
};
|
||||
|
||||
const writeBytesToFile = (filePath: string, numberOfBytes: number) => {
|
||||
writeFileSync(filePath, 'a'.repeat(numberOfBytes), { flag: 'a' });
|
||||
};
|
||||
|
||||
describe('LogRotator', () => {
|
||||
beforeEach(() => {
|
||||
mkdirSync(tempDir, { recursive: true });
|
||||
writeFileSync(testFilePath, '');
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
del.sync(tempDir, { force: true });
|
||||
mockOn.mockClear();
|
||||
});
|
||||
|
||||
it('rotates log file when bigger than set limit on start', async () => {
|
||||
writeBytesToFile(testFilePath, 3);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
|
||||
await logRotator.stop();
|
||||
|
||||
expect(existsSync(join(tempDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
});
|
||||
|
||||
it('rotates log file when equal than set limit over time', async () => {
|
||||
writeBytesToFile(testFilePath, 1);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
|
||||
const testLogFileDir = dirname(testFilePath);
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeFalsy();
|
||||
|
||||
writeBytesToFile(testFilePath, 1);
|
||||
|
||||
// ['change', [asyncFunction]]
|
||||
const onChangeCb = mockOn.mock.calls[0][1];
|
||||
await onChangeCb(testLogFileDir, { size: 2 });
|
||||
|
||||
await logRotator.stop();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
});
|
||||
|
||||
it('rotates log file when file size is bigger than limit', async () => {
|
||||
writeBytesToFile(testFilePath, 1);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
|
||||
const testLogFileDir = dirname(testFilePath);
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeFalsy();
|
||||
|
||||
writeBytesToFile(testFilePath, 2);
|
||||
|
||||
// ['change', [asyncFunction]]
|
||||
const onChangeCb = mockOn.mock.calls[0][1];
|
||||
await onChangeCb(testLogFileDir, { size: 3 });
|
||||
|
||||
await logRotator.stop();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
});
|
||||
|
||||
it('rotates log file service correctly keeps number of files', async () => {
|
||||
writeBytesToFile(testFilePath, 3);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
|
||||
const testLogFileDir = dirname(testFilePath);
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
|
||||
writeBytesToFile(testFilePath, 2);
|
||||
|
||||
// ['change', [asyncFunction]]
|
||||
const onChangeCb = mockOn.mock.calls[0][1];
|
||||
await onChangeCb(testLogFileDir, { size: 2 });
|
||||
|
||||
writeBytesToFile(testFilePath, 5);
|
||||
await onChangeCb(testLogFileDir, { size: 5 });
|
||||
|
||||
await logRotator.stop();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.1'))).toBeTruthy();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.2'))).toBeFalsy();
|
||||
expect(statSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0')).size).toBe(5);
|
||||
});
|
||||
|
||||
it('rotates log file service correctly keeps number of files even when number setting changes', async () => {
|
||||
writeBytesToFile(testFilePath, 3);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
|
||||
const testLogFileDir = dirname(testFilePath);
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
|
||||
writeBytesToFile(testFilePath, 2);
|
||||
|
||||
// ['change', [asyncFunction]]
|
||||
const onChangeCb = mockOn.mock.calls[0][1];
|
||||
await onChangeCb(testLogFileDir, { size: 2 });
|
||||
|
||||
writeBytesToFile(testFilePath, 5);
|
||||
await onChangeCb(testLogFileDir, { size: 5 });
|
||||
|
||||
await logRotator.stop();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.1'))).toBeTruthy();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.2'))).toBeFalsy();
|
||||
expect(statSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0')).size).toBe(5);
|
||||
|
||||
logRotator.keepFiles = 1;
|
||||
await logRotator.start();
|
||||
|
||||
writeBytesToFile(testFilePath, 5);
|
||||
await onChangeCb(testLogFileDir, { size: 5 });
|
||||
|
||||
await logRotator.stop();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0'))).toBeTruthy();
|
||||
expect(existsSync(join(testLogFileDir, 'log_rotator_test_log_file.log.1'))).toBeFalsy();
|
||||
expect(statSync(join(testLogFileDir, 'log_rotator_test_log_file.log.0')).size).toBe(5);
|
||||
});
|
||||
|
||||
it('rotates log file service correctly detects usePolling when it should be false', async () => {
|
||||
writeBytesToFile(testFilePath, 1);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
expect(logRotator.usePolling).toBe(false);
|
||||
|
||||
const shouldUsePolling = await logRotator._shouldUsePolling();
|
||||
expect(shouldUsePolling).toBe(false);
|
||||
|
||||
await logRotator.stop();
|
||||
});
|
||||
|
||||
it('rotates log file service correctly detects usePolling when it should be true', async () => {
|
||||
writeBytesToFile(testFilePath, 1);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
|
||||
jest.spyOn(fs, 'watch').mockImplementation(
|
||||
() =>
|
||||
({
|
||||
on: jest.fn((eventType, cb) => {
|
||||
if (eventType === 'error') {
|
||||
cb();
|
||||
}
|
||||
}),
|
||||
close: jest.fn(),
|
||||
} as any)
|
||||
);
|
||||
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
expect(logRotator.usePolling).toBe(false);
|
||||
expect(logRotator.shouldUsePolling).toBe(true);
|
||||
|
||||
await logRotator.stop();
|
||||
});
|
||||
|
||||
it('rotates log file service correctly fallback to usePolling true after defined timeout', async () => {
|
||||
jest.useFakeTimers();
|
||||
writeBytesToFile(testFilePath, 1);
|
||||
|
||||
const logRotator = new LogRotator(createLogRotatorConfig(testFilePath), mockServer);
|
||||
jest.spyOn(logRotator, '_sendReloadLogConfigSignal').mockImplementation(() => {});
|
||||
jest.spyOn(fs, 'watch').mockImplementation(
|
||||
() =>
|
||||
({
|
||||
on: jest.fn((ev: string) => {
|
||||
if (ev === 'error') {
|
||||
jest.runTimersToTime(15000);
|
||||
}
|
||||
}),
|
||||
close: jest.fn(),
|
||||
} as any)
|
||||
);
|
||||
|
||||
await logRotator.start();
|
||||
|
||||
expect(logRotator.running).toBe(true);
|
||||
expect(logRotator.usePolling).toBe(false);
|
||||
expect(logRotator.shouldUsePolling).toBe(true);
|
||||
|
||||
await logRotator.stop();
|
||||
jest.useRealTimers();
|
||||
});
|
||||
});
|
|
@ -1,352 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import * as chokidar from 'chokidar';
|
||||
import fs from 'fs';
|
||||
import { Server } from '@hapi/hapi';
|
||||
import { throttle } from 'lodash';
|
||||
import { tmpdir } from 'os';
|
||||
import { basename, dirname, join, sep } from 'path';
|
||||
import { Observable } from 'rxjs';
|
||||
import { first } from 'rxjs/operators';
|
||||
import { promisify } from 'util';
|
||||
import { LegacyLoggingConfig } from '../schema';
|
||||
|
||||
const mkdirAsync = promisify(fs.mkdir);
|
||||
const readdirAsync = promisify(fs.readdir);
|
||||
const renameAsync = promisify(fs.rename);
|
||||
const statAsync = promisify(fs.stat);
|
||||
const unlinkAsync = promisify(fs.unlink);
|
||||
const writeFileAsync = promisify(fs.writeFile);
|
||||
|
||||
export class LogRotator {
|
||||
private readonly config: LegacyLoggingConfig;
|
||||
private readonly log: Server['log'];
|
||||
public logFilePath: string;
|
||||
public everyBytes: number;
|
||||
public keepFiles: number;
|
||||
public running: boolean;
|
||||
private logFileSize: number;
|
||||
public isRotating: boolean;
|
||||
public throttledRotate: () => void;
|
||||
public stalker: chokidar.FSWatcher | null;
|
||||
public usePolling: boolean;
|
||||
public pollingInterval: number;
|
||||
private stalkerUsePollingPolicyTestTimeout: NodeJS.Timeout | null;
|
||||
public shouldUsePolling: boolean;
|
||||
|
||||
constructor(config: LegacyLoggingConfig, server: Server) {
|
||||
this.config = config;
|
||||
this.log = server.log.bind(server);
|
||||
this.logFilePath = config.dest;
|
||||
this.everyBytes = config.rotate.everyBytes;
|
||||
this.keepFiles = config.rotate.keepFiles;
|
||||
this.running = false;
|
||||
this.logFileSize = 0;
|
||||
this.isRotating = false;
|
||||
this.throttledRotate = throttle(async () => await this._rotate(), 5000);
|
||||
this.stalker = null;
|
||||
this.usePolling = config.rotate.usePolling;
|
||||
this.pollingInterval = config.rotate.pollingInterval;
|
||||
this.shouldUsePolling = false;
|
||||
this.stalkerUsePollingPolicyTestTimeout = null;
|
||||
}
|
||||
|
||||
async start() {
|
||||
if (this.running) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.running = true;
|
||||
|
||||
// create exit listener for cleanup purposes
|
||||
this._createExitListener();
|
||||
|
||||
// call rotate on startup
|
||||
await this._callRotateOnStartup();
|
||||
|
||||
// init log file size monitor
|
||||
await this._startLogFileSizeMonitor();
|
||||
}
|
||||
|
||||
stop = () => {
|
||||
if (!this.running) {
|
||||
return;
|
||||
}
|
||||
|
||||
// cleanup exit listener
|
||||
this._deleteExitListener();
|
||||
|
||||
// stop log file size monitor
|
||||
this._stopLogFileSizeMonitor();
|
||||
|
||||
this.running = false;
|
||||
};
|
||||
|
||||
async _shouldUsePolling() {
|
||||
try {
|
||||
// Setup a test file in order to try the fs env
|
||||
// and understand if we need to usePolling or not
|
||||
const tempFileDir = tmpdir();
|
||||
const tempFile = join(tempFileDir, 'kbn_log_rotation_use_polling_test_file.log');
|
||||
|
||||
await mkdirAsync(tempFileDir, { recursive: true });
|
||||
await writeFileAsync(tempFile, '');
|
||||
|
||||
// setup fs.watch for the temp test file
|
||||
const testWatcher = fs.watch(tempFile, { persistent: false });
|
||||
|
||||
// await writeFileAsync(tempFile, 'test');
|
||||
|
||||
const usePollingTest$ = new Observable<boolean>((observer) => {
|
||||
// observable complete function
|
||||
const completeFn = (completeStatus: boolean) => {
|
||||
if (this.stalkerUsePollingPolicyTestTimeout) {
|
||||
clearTimeout(this.stalkerUsePollingPolicyTestTimeout);
|
||||
}
|
||||
testWatcher.close();
|
||||
|
||||
observer.next(completeStatus);
|
||||
observer.complete();
|
||||
};
|
||||
|
||||
// setup conditions that would fire the observable
|
||||
this.stalkerUsePollingPolicyTestTimeout = setTimeout(
|
||||
() => completeFn(true),
|
||||
this.config.rotate.pollingPolicyTestTimeout || 15000
|
||||
);
|
||||
testWatcher.on('change', () => completeFn(false));
|
||||
testWatcher.on('error', () => completeFn(true));
|
||||
|
||||
// fire test watcher events
|
||||
setTimeout(() => {
|
||||
fs.writeFileSync(tempFile, 'test');
|
||||
}, 0);
|
||||
});
|
||||
|
||||
// wait for the first observable result and consider it as the result
|
||||
// for our use polling test
|
||||
const usePollingTestResult = await usePollingTest$.pipe(first()).toPromise();
|
||||
|
||||
// delete the temp file used for the test
|
||||
await unlinkAsync(tempFile);
|
||||
|
||||
return usePollingTestResult;
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
async _startLogFileSizeMonitor() {
|
||||
this.usePolling = this.config.rotate.usePolling;
|
||||
this.shouldUsePolling = await this._shouldUsePolling();
|
||||
|
||||
if (this.usePolling && !this.shouldUsePolling) {
|
||||
this.log(
|
||||
['warning', 'logging:rotate'],
|
||||
'Looks like your current environment support a faster algorithm than polling. You can try to disable `usePolling`'
|
||||
);
|
||||
}
|
||||
|
||||
if (!this.usePolling && this.shouldUsePolling) {
|
||||
this.log(
|
||||
['error', 'logging:rotate'],
|
||||
'Looks like within your current environment you need to use polling in order to enable log rotator. Please enable `usePolling`'
|
||||
);
|
||||
}
|
||||
|
||||
this.stalker = chokidar.watch(this.logFilePath, {
|
||||
ignoreInitial: true,
|
||||
awaitWriteFinish: false,
|
||||
useFsEvents: false,
|
||||
usePolling: this.usePolling,
|
||||
interval: this.pollingInterval,
|
||||
binaryInterval: this.pollingInterval,
|
||||
alwaysStat: true,
|
||||
atomic: false,
|
||||
});
|
||||
this.stalker.on('change', this._logFileSizeMonitorHandler);
|
||||
}
|
||||
|
||||
_logFileSizeMonitorHandler = async (filename: string, stats: fs.Stats) => {
|
||||
if (!filename || !stats) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.logFileSize = stats.size || 0;
|
||||
await this.throttledRotate();
|
||||
};
|
||||
|
||||
_stopLogFileSizeMonitor() {
|
||||
if (!this.stalker) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.stalker.close();
|
||||
|
||||
if (this.stalkerUsePollingPolicyTestTimeout) {
|
||||
clearTimeout(this.stalkerUsePollingPolicyTestTimeout);
|
||||
}
|
||||
}
|
||||
|
||||
_createExitListener() {
|
||||
process.on('exit', this.stop);
|
||||
}
|
||||
|
||||
_deleteExitListener() {
|
||||
process.removeListener('exit', this.stop);
|
||||
}
|
||||
|
||||
async _getLogFileSizeAndCreateIfNeeded() {
|
||||
try {
|
||||
const logFileStats = await statAsync(this.logFilePath);
|
||||
return logFileStats.size;
|
||||
} catch {
|
||||
// touch the file to make the watcher being able to register
|
||||
// change events
|
||||
await writeFileAsync(this.logFilePath, '');
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
async _callRotateOnStartup() {
|
||||
this.logFileSize = await this._getLogFileSizeAndCreateIfNeeded();
|
||||
await this._rotate();
|
||||
}
|
||||
|
||||
_shouldRotate() {
|
||||
// should rotate evaluation
|
||||
// 1. should rotate if current log size exceeds
|
||||
// the defined one on everyBytes
|
||||
// 2. should not rotate if is already rotating or if any
|
||||
// of the conditions on 1. do not apply
|
||||
if (this.isRotating) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.logFileSize >= this.everyBytes;
|
||||
}
|
||||
|
||||
async _rotate() {
|
||||
if (!this._shouldRotate()) {
|
||||
return;
|
||||
}
|
||||
|
||||
await this._rotateNow();
|
||||
}
|
||||
|
||||
async _rotateNow() {
|
||||
// rotate process
|
||||
// 1. get rotated files metadata (list of log rotated files present on the log folder, numerical sorted)
|
||||
// 2. delete last file
|
||||
// 3. rename all files to the correct index +1
|
||||
// 4. rename + compress current log into 1
|
||||
// 5. send SIGHUP to reload log config
|
||||
|
||||
// rotate process is starting
|
||||
this.isRotating = true;
|
||||
|
||||
// get rotated files metadata
|
||||
const foundRotatedFiles = await this._readRotatedFilesMetadata();
|
||||
|
||||
// delete number of rotated files exceeding the keepFiles limit setting
|
||||
const rotatedFiles: string[] = await this._deleteFoundRotatedFilesAboveKeepFilesLimit(
|
||||
foundRotatedFiles
|
||||
);
|
||||
|
||||
// delete last file
|
||||
await this._deleteLastRotatedFile(rotatedFiles);
|
||||
|
||||
// rename all files to correct index + 1
|
||||
// and normalize numbering if by some reason
|
||||
// (for example log file deletion) that numbering
|
||||
// was interrupted
|
||||
await this._renameRotatedFilesByOne(rotatedFiles);
|
||||
|
||||
// rename current log into 0
|
||||
await this._rotateCurrentLogFile();
|
||||
|
||||
// send SIGHUP to reload log configuration
|
||||
this._sendReloadLogConfigSignal();
|
||||
|
||||
// Reset log file size
|
||||
this.logFileSize = 0;
|
||||
|
||||
// rotate process is finished
|
||||
this.isRotating = false;
|
||||
}
|
||||
|
||||
async _readRotatedFilesMetadata() {
|
||||
const logFileBaseName = basename(this.logFilePath);
|
||||
const logFilesFolder = dirname(this.logFilePath);
|
||||
const foundLogFiles: string[] = await readdirAsync(logFilesFolder);
|
||||
|
||||
return (
|
||||
foundLogFiles
|
||||
.filter((file) => new RegExp(`${logFileBaseName}\\.\\d`).test(file))
|
||||
// we use .slice(-1) here in order to retrieve the last number match in the read filenames
|
||||
.sort((a, b) => Number(a.match(/(\d+)/g)!.slice(-1)) - Number(b.match(/(\d+)/g)!.slice(-1)))
|
||||
.map((filename) => `${logFilesFolder}${sep}${filename}`)
|
||||
);
|
||||
}
|
||||
|
||||
async _deleteFoundRotatedFilesAboveKeepFilesLimit(foundRotatedFiles: string[]) {
|
||||
if (foundRotatedFiles.length <= this.keepFiles) {
|
||||
return foundRotatedFiles;
|
||||
}
|
||||
|
||||
const finalRotatedFiles = foundRotatedFiles.slice(0, this.keepFiles);
|
||||
const rotatedFilesToDelete = foundRotatedFiles.slice(
|
||||
finalRotatedFiles.length,
|
||||
foundRotatedFiles.length
|
||||
);
|
||||
|
||||
await Promise.all(
|
||||
rotatedFilesToDelete.map((rotatedFilePath: string) => unlinkAsync(rotatedFilePath))
|
||||
);
|
||||
|
||||
return finalRotatedFiles;
|
||||
}
|
||||
|
||||
async _deleteLastRotatedFile(rotatedFiles: string[]) {
|
||||
if (rotatedFiles.length < this.keepFiles) {
|
||||
return;
|
||||
}
|
||||
|
||||
const lastFilePath: string = rotatedFiles.pop() as string;
|
||||
await unlinkAsync(lastFilePath);
|
||||
}
|
||||
|
||||
async _renameRotatedFilesByOne(rotatedFiles: string[]) {
|
||||
const logFileBaseName = basename(this.logFilePath);
|
||||
const logFilesFolder = dirname(this.logFilePath);
|
||||
|
||||
for (let i = rotatedFiles.length - 1; i >= 0; i--) {
|
||||
const oldFilePath = rotatedFiles[i];
|
||||
const newFilePath = `${logFilesFolder}${sep}${logFileBaseName}.${i + 1}`;
|
||||
await renameAsync(oldFilePath, newFilePath);
|
||||
}
|
||||
}
|
||||
|
||||
async _rotateCurrentLogFile() {
|
||||
const newFilePath = `${this.logFilePath}.0`;
|
||||
await renameAsync(this.logFilePath, newFilePath);
|
||||
}
|
||||
|
||||
_sendReloadLogConfigSignal() {
|
||||
if (!process.env.isDevCliChild || !process.send) {
|
||||
process.emit('SIGHUP', 'SIGHUP');
|
||||
return;
|
||||
}
|
||||
|
||||
// Send a special message to the cluster manager
|
||||
// so it can forward it correctly
|
||||
// It will only run when we are under cluster mode (not under a production environment)
|
||||
process.send(['RELOAD_LOGGING_CONFIG_FROM_SERVER_WORKER']);
|
||||
}
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { schema } from '@kbn/config-schema';
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
*
|
||||
* Legacy logging has been deprecated and will be removed in 8.0.
|
||||
* Set up logging from the platform logging instead
|
||||
*/
|
||||
export interface LegacyLoggingConfig {
|
||||
silent: boolean;
|
||||
quiet: boolean;
|
||||
verbose: boolean;
|
||||
events: Record<string, any>;
|
||||
dest: string;
|
||||
filter: Record<string, any>;
|
||||
json: boolean;
|
||||
timezone?: string;
|
||||
rotate: {
|
||||
enabled: boolean;
|
||||
everyBytes: number;
|
||||
keepFiles: number;
|
||||
pollingInterval: number;
|
||||
usePolling: boolean;
|
||||
pollingPolicyTestTimeout?: number;
|
||||
};
|
||||
}
|
||||
|
||||
export const legacyLoggingConfigSchema = schema.object({
|
||||
silent: schema.boolean({ defaultValue: false }),
|
||||
quiet: schema.conditional(
|
||||
schema.siblingRef('silent'),
|
||||
true,
|
||||
schema.boolean({
|
||||
defaultValue: true,
|
||||
validate: (quiet) => {
|
||||
if (!quiet) {
|
||||
return 'must be true when `silent` is true';
|
||||
}
|
||||
},
|
||||
}),
|
||||
schema.boolean({ defaultValue: false })
|
||||
),
|
||||
verbose: schema.conditional(
|
||||
schema.siblingRef('quiet'),
|
||||
true,
|
||||
schema.boolean({
|
||||
defaultValue: false,
|
||||
validate: (verbose) => {
|
||||
if (verbose) {
|
||||
return 'must be false when `quiet` is true';
|
||||
}
|
||||
},
|
||||
}),
|
||||
schema.boolean({ defaultValue: false })
|
||||
),
|
||||
events: schema.recordOf(schema.string(), schema.any(), { defaultValue: {} }),
|
||||
dest: schema.string({ defaultValue: 'stdout' }),
|
||||
filter: schema.recordOf(schema.string(), schema.any(), { defaultValue: {} }),
|
||||
json: schema.conditional(
|
||||
schema.siblingRef('dest'),
|
||||
'stdout',
|
||||
schema.boolean({
|
||||
defaultValue: !process.stdout.isTTY,
|
||||
}),
|
||||
schema.boolean({
|
||||
defaultValue: true,
|
||||
})
|
||||
),
|
||||
timezone: schema.maybe(schema.string()),
|
||||
rotate: schema.object({
|
||||
enabled: schema.boolean({ defaultValue: false }),
|
||||
everyBytes: schema.number({
|
||||
min: 1048576, // > 1MB
|
||||
max: 1073741825, // < 1GB
|
||||
defaultValue: 10485760, // 10MB
|
||||
}),
|
||||
keepFiles: schema.number({
|
||||
min: 2,
|
||||
max: 1024,
|
||||
defaultValue: 7,
|
||||
}),
|
||||
pollingInterval: schema.number({
|
||||
min: 5000,
|
||||
max: 3600000,
|
||||
defaultValue: 10000,
|
||||
}),
|
||||
usePolling: schema.boolean({ defaultValue: false }),
|
||||
}),
|
||||
});
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Server } from '@hapi/hapi';
|
||||
import { reconfigureLogging, setupLogging } from './setup_logging';
|
||||
import { LegacyLoggingConfig } from './schema';
|
||||
|
||||
describe('reconfigureLogging', () => {
|
||||
test(`doesn't throw an error`, () => {
|
||||
const server = new Server();
|
||||
const config: LegacyLoggingConfig = {
|
||||
silent: false,
|
||||
quiet: false,
|
||||
verbose: true,
|
||||
events: {},
|
||||
dest: '/tmp/foo',
|
||||
filter: {},
|
||||
json: true,
|
||||
rotate: {
|
||||
enabled: false,
|
||||
everyBytes: 0,
|
||||
keepFiles: 0,
|
||||
pollingInterval: 0,
|
||||
usePolling: false,
|
||||
},
|
||||
};
|
||||
setupLogging(server, config, 10);
|
||||
reconfigureLogging(server, { ...config, dest: '/tmp/bar' }, 0);
|
||||
});
|
||||
});
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
// @ts-expect-error missing typedef
|
||||
import { plugin as good } from '@elastic/good';
|
||||
import { Server } from '@hapi/hapi';
|
||||
import { LegacyLoggingConfig } from './schema';
|
||||
import { getLoggingConfiguration } from './get_logging_config';
|
||||
|
||||
export async function setupLogging(
|
||||
server: Server,
|
||||
config: LegacyLoggingConfig,
|
||||
opsInterval: number
|
||||
) {
|
||||
// NOTE: legacy logger creates a new stream for each new access
|
||||
// In https://github.com/elastic/kibana/pull/55937 we reach the max listeners
|
||||
// default limit of 10 for process.stdout which starts a long warning/error
|
||||
// thrown every time we start the server.
|
||||
// In order to keep using the legacy logger until we remove it I'm just adding
|
||||
// a new hard limit here.
|
||||
process.stdout.setMaxListeners(60);
|
||||
|
||||
return await server.register({
|
||||
plugin: good,
|
||||
options: getLoggingConfiguration(config, opsInterval),
|
||||
});
|
||||
}
|
||||
|
||||
export function reconfigureLogging(
|
||||
server: Server,
|
||||
config: LegacyLoggingConfig,
|
||||
opsInterval: number
|
||||
) {
|
||||
const loggingOptions = getLoggingConfiguration(config, opsInterval);
|
||||
(server.plugins as any).good.reconfigure(loggingOptions);
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { applyFiltersToKeys } from './apply_filters_to_keys';
|
||||
|
||||
describe('applyFiltersToKeys(obj, actionsByKey)', function () {
|
||||
it('applies for each key+prop in actionsByKey', function () {
|
||||
const data = applyFiltersToKeys(
|
||||
{
|
||||
a: {
|
||||
b: {
|
||||
c: 1,
|
||||
},
|
||||
d: {
|
||||
e: 'foobar',
|
||||
},
|
||||
},
|
||||
req: {
|
||||
headers: {
|
||||
authorization: 'Basic dskd939k2i',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
b: 'remove',
|
||||
e: 'censor',
|
||||
authorization: '/([^\\s]+)$/',
|
||||
}
|
||||
);
|
||||
|
||||
expect(data).toEqual({
|
||||
a: {
|
||||
d: {
|
||||
e: 'XXXXXX',
|
||||
},
|
||||
},
|
||||
req: {
|
||||
headers: {
|
||||
authorization: 'Basic XXXXXXXXXX',
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
function toPojo(obj: Record<string, unknown>) {
|
||||
return JSON.parse(JSON.stringify(obj));
|
||||
}
|
||||
|
||||
function replacer(match: string, group: any[]) {
|
||||
return new Array(group.length + 1).join('X');
|
||||
}
|
||||
|
||||
function apply(obj: Record<string, unknown>, key: string, action: string) {
|
||||
for (const k in obj) {
|
||||
if (obj.hasOwnProperty(k)) {
|
||||
let val = obj[k];
|
||||
if (k === key) {
|
||||
if (action === 'remove') {
|
||||
delete obj[k];
|
||||
} else if (action === 'censor' && typeof val === 'object') {
|
||||
delete obj[key];
|
||||
} else if (action === 'censor') {
|
||||
obj[k] = ('' + val).replace(/./g, 'X');
|
||||
} else if (/\/.+\//.test(action)) {
|
||||
const matches = action.match(/\/(.+)\//);
|
||||
if (matches) {
|
||||
const regex = new RegExp(matches[1]);
|
||||
obj[k] = ('' + val).replace(regex, replacer);
|
||||
}
|
||||
}
|
||||
} else if (typeof val === 'object') {
|
||||
val = apply(val as Record<string, any>, key, action);
|
||||
}
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
export function applyFiltersToKeys(
|
||||
obj: Record<string, unknown>,
|
||||
actionsByKey: Record<string, string>
|
||||
) {
|
||||
return Object.keys(actionsByKey).reduce((output, key) => {
|
||||
return apply(output, key, actionsByKey[key]);
|
||||
}, toPojo(obj));
|
||||
}
|
|
@ -1,158 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import mockFs from 'mock-fs';
|
||||
import { createReadStream } from 'fs';
|
||||
import { PassThrough } from 'stream';
|
||||
import { createGzip, createGunzip } from 'zlib';
|
||||
|
||||
import { getResponsePayloadBytes } from './get_payload_size';
|
||||
|
||||
describe('getPayloadSize', () => {
|
||||
describe('handles Buffers', () => {
|
||||
test('with ascii characters', () => {
|
||||
const payload = 'heya';
|
||||
const result = getResponsePayloadBytes(Buffer.from(payload));
|
||||
expect(result).toBe(4);
|
||||
});
|
||||
|
||||
test('with special characters', () => {
|
||||
const payload = '¡hola!';
|
||||
const result = getResponsePayloadBytes(Buffer.from(payload));
|
||||
expect(result).toBe(7);
|
||||
});
|
||||
});
|
||||
|
||||
describe('handles streams', () => {
|
||||
afterEach(() => mockFs.restore());
|
||||
|
||||
test('ignores streams that are not fs or zlib streams', async () => {
|
||||
const result = getResponsePayloadBytes(new PassThrough());
|
||||
expect(result).toBe(undefined);
|
||||
});
|
||||
|
||||
describe('fs streams', () => {
|
||||
test('with ascii characters', async () => {
|
||||
mockFs({ 'test.txt': 'heya' });
|
||||
const readStream = createReadStream('test.txt');
|
||||
|
||||
let data = '';
|
||||
for await (const chunk of readStream) {
|
||||
data += chunk;
|
||||
}
|
||||
|
||||
const result = getResponsePayloadBytes(readStream);
|
||||
expect(result).toBe(Buffer.byteLength(data));
|
||||
});
|
||||
|
||||
test('with special characters', async () => {
|
||||
mockFs({ 'test.txt': '¡hola!' });
|
||||
const readStream = createReadStream('test.txt');
|
||||
|
||||
let data = '';
|
||||
for await (const chunk of readStream) {
|
||||
data += chunk;
|
||||
}
|
||||
|
||||
const result = getResponsePayloadBytes(readStream);
|
||||
expect(result).toBe(Buffer.byteLength(data));
|
||||
});
|
||||
|
||||
describe('zlib streams', () => {
|
||||
test('with ascii characters', async () => {
|
||||
mockFs({ 'test.txt': 'heya' });
|
||||
const readStream = createReadStream('test.txt');
|
||||
const source = readStream.pipe(createGzip()).pipe(createGunzip());
|
||||
|
||||
let data = '';
|
||||
for await (const chunk of source) {
|
||||
data += chunk;
|
||||
}
|
||||
|
||||
const result = getResponsePayloadBytes(source);
|
||||
|
||||
expect(data).toBe('heya');
|
||||
expect(result).toBe(source.bytesWritten);
|
||||
});
|
||||
|
||||
test('with special characters', async () => {
|
||||
mockFs({ 'test.txt': '¡hola!' });
|
||||
const readStream = createReadStream('test.txt');
|
||||
const source = readStream.pipe(createGzip()).pipe(createGunzip());
|
||||
|
||||
let data = '';
|
||||
for await (const chunk of source) {
|
||||
data += chunk;
|
||||
}
|
||||
|
||||
const result = getResponsePayloadBytes(source);
|
||||
|
||||
expect(data).toBe('¡hola!');
|
||||
expect(result).toBe(source.bytesWritten);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('handles plain responses', () => {
|
||||
test('when source is text', () => {
|
||||
const result = getResponsePayloadBytes('heya');
|
||||
expect(result).toBe(4);
|
||||
});
|
||||
|
||||
test('when source contains special characters', () => {
|
||||
const result = getResponsePayloadBytes('¡hola!');
|
||||
expect(result).toBe(7);
|
||||
});
|
||||
|
||||
test('when source is object', () => {
|
||||
const payload = { message: 'heya' };
|
||||
const result = getResponsePayloadBytes(payload);
|
||||
expect(result).toBe(JSON.stringify(payload).length);
|
||||
});
|
||||
|
||||
test('when source is array object', () => {
|
||||
const payload = [{ message: 'hey' }, { message: 'ya' }];
|
||||
const result = getResponsePayloadBytes(payload);
|
||||
expect(result).toBe(JSON.stringify(payload).length);
|
||||
});
|
||||
|
||||
test('returns undefined when source is not plain object', () => {
|
||||
class TestClass {
|
||||
constructor() {}
|
||||
}
|
||||
const result = getResponsePayloadBytes(new TestClass());
|
||||
expect(result).toBe(undefined);
|
||||
});
|
||||
});
|
||||
|
||||
describe('handles content-length header', () => {
|
||||
test('always provides content-length header if available', () => {
|
||||
const headers = { 'content-length': '123' };
|
||||
const result = getResponsePayloadBytes('heya', headers);
|
||||
expect(result).toBe(123);
|
||||
});
|
||||
|
||||
test('uses first value when hapi header is an array', () => {
|
||||
const headers = { 'content-length': ['123', '456'] };
|
||||
const result = getResponsePayloadBytes(null, headers);
|
||||
expect(result).toBe(123);
|
||||
});
|
||||
|
||||
test('returns undefined if length is NaN', () => {
|
||||
const headers = { 'content-length': 'oops' };
|
||||
const result = getResponsePayloadBytes(null, headers);
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
test('defaults to undefined', () => {
|
||||
const result = getResponsePayloadBytes(null);
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
|
@ -1,71 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { isPlainObject } from 'lodash';
|
||||
import { ReadStream } from 'fs';
|
||||
import { Zlib } from 'zlib';
|
||||
import type { ResponseObject } from '@hapi/hapi';
|
||||
|
||||
const isBuffer = (obj: unknown): obj is Buffer => Buffer.isBuffer(obj);
|
||||
const isFsReadStream = (obj: unknown): obj is ReadStream =>
|
||||
typeof obj === 'object' && obj !== null && 'bytesRead' in obj && obj instanceof ReadStream;
|
||||
const isZlibStream = (obj: unknown): obj is Zlib => {
|
||||
return typeof obj === 'object' && obj !== null && 'bytesWritten' in obj;
|
||||
};
|
||||
const isString = (obj: unknown): obj is string => typeof obj === 'string';
|
||||
|
||||
/**
|
||||
* Attempts to determine the size (in bytes) of a hapi/good
|
||||
* responsePayload based on the payload type. Falls back to
|
||||
* `undefined` if the size cannot be determined.
|
||||
*
|
||||
* This is similar to the implementation in `core/server/http/logging`,
|
||||
* however it uses more duck typing as we do not have access to the
|
||||
* entire hapi request object like we do in the HttpServer.
|
||||
*
|
||||
* @param headers responseHeaders from hapi/good event
|
||||
* @param payload responsePayload from hapi/good event
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
export function getResponsePayloadBytes(
|
||||
payload: ResponseObject['source'],
|
||||
headers: Record<string, any> = {}
|
||||
): number | undefined {
|
||||
const contentLength = headers['content-length'];
|
||||
if (contentLength) {
|
||||
const val = parseInt(
|
||||
// hapi response headers can be `string | string[]`, so we need to handle both cases
|
||||
Array.isArray(contentLength) ? String(contentLength) : contentLength,
|
||||
10
|
||||
);
|
||||
return !isNaN(val) ? val : undefined;
|
||||
}
|
||||
|
||||
if (isBuffer(payload)) {
|
||||
return payload.byteLength;
|
||||
}
|
||||
|
||||
if (isFsReadStream(payload)) {
|
||||
return payload.bytesRead;
|
||||
}
|
||||
|
||||
if (isZlibStream(payload)) {
|
||||
return payload.bytesWritten;
|
||||
}
|
||||
|
||||
if (isString(payload)) {
|
||||
return Buffer.byteLength(payload);
|
||||
}
|
||||
|
||||
if (isPlainObject(payload) || Array.isArray(payload)) {
|
||||
return Buffer.byteLength(JSON.stringify(payload));
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export { applyFiltersToKeys } from './apply_filters_to_keys';
|
||||
export { getResponsePayloadBytes } from './get_payload_size';
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
"extends": "../../tsconfig.bazel.json",
|
||||
"compilerOptions": {
|
||||
"declaration": true,
|
||||
"declarationMap": true,
|
||||
"emitDeclarationOnly": true,
|
||||
"outDir": "target_types",
|
||||
"rootDir": "src",
|
||||
"sourceMap": true,
|
||||
"sourceRoot": "../../../../packages/kbn-legacy-logging/src",
|
||||
"stripInternal": false,
|
||||
"types": ["jest", "node"]
|
||||
},
|
||||
"include": ["src/**/*"]
|
||||
}
|
|
@ -1,3 +1,13 @@
|
|||
logging:
|
||||
root:
|
||||
level: fatal
|
||||
appenders: [console-json]
|
||||
appenders:
|
||||
console-json:
|
||||
type: console
|
||||
layout:
|
||||
type: json
|
||||
|
||||
unknown:
|
||||
key: 1
|
||||
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
server:
|
||||
autoListen: false
|
||||
port: 8274
|
||||
logging:
|
||||
json: true
|
||||
optimize:
|
||||
enabled: false
|
||||
plugins:
|
||||
initialize: false
|
||||
migrations:
|
||||
skip: true
|
||||
elasticsearch:
|
||||
skipStartupConnectionCheck: true
|
|
@ -14,14 +14,15 @@ const INVALID_CONFIG_PATH = require.resolve('./__fixtures__/invalid_config.yml')
|
|||
|
||||
interface LogEntry {
|
||||
message: string;
|
||||
tags?: string[];
|
||||
type: string;
|
||||
log: {
|
||||
level: string;
|
||||
};
|
||||
}
|
||||
|
||||
describe('cli invalid config support', function () {
|
||||
describe('cli invalid config support', () => {
|
||||
it(
|
||||
'exits with statusCode 64 and logs a single line when config is invalid',
|
||||
function () {
|
||||
'exits with statusCode 64 and logs an error when config is invalid',
|
||||
() => {
|
||||
// Unused keys only throw once LegacyService starts, so disable migrations so that Core
|
||||
// will finish the start lifecycle without a running Elasticsearch instance.
|
||||
const { error, status, stdout, stderr } = spawnSync(
|
||||
|
@ -31,41 +32,27 @@ describe('cli invalid config support', function () {
|
|||
cwd: REPO_ROOT,
|
||||
}
|
||||
);
|
||||
expect(error).toBe(undefined);
|
||||
|
||||
let fatalLogLine;
|
||||
let fatalLogEntries;
|
||||
try {
|
||||
[fatalLogLine] = stdout
|
||||
fatalLogEntries = stdout
|
||||
.toString('utf8')
|
||||
.split('\n')
|
||||
.filter(Boolean)
|
||||
.map((line) => JSON.parse(line) as LogEntry)
|
||||
.filter((line) => line.tags?.includes('fatal'))
|
||||
.map((obj) => ({
|
||||
...obj,
|
||||
pid: '## PID ##',
|
||||
'@timestamp': '## @timestamp ##',
|
||||
error: '## Error with stack trace ##',
|
||||
}));
|
||||
.filter((line) => line.log.level === 'FATAL');
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`error parsing log output:\n\n${e.stack}\n\nstdout: \n${stdout}\n\nstderr:\n${stderr}`
|
||||
);
|
||||
}
|
||||
|
||||
expect(error).toBe(undefined);
|
||||
|
||||
if (!fatalLogLine) {
|
||||
throw new Error(
|
||||
`cli did not log the expected fatal error message:\n\nstdout: \n${stdout}\n\nstderr:\n${stderr}`
|
||||
);
|
||||
}
|
||||
|
||||
expect(fatalLogLine.message).toContain(
|
||||
'Error: Unknown configuration key(s): "unknown.key", "other.unknown.key", "other.third", "some.flat.key", ' +
|
||||
expect(fatalLogEntries).toHaveLength(1);
|
||||
expect(fatalLogEntries[0].message).toContain(
|
||||
'Unknown configuration key(s): "unknown.key", "other.unknown.key", "other.third", "some.flat.key", ' +
|
||||
'"some.array". Check for spelling errors and ensure that expected plugins are installed.'
|
||||
);
|
||||
expect(fatalLogLine.tags).toEqual(['fatal', 'root']);
|
||||
expect(fatalLogLine.type).toEqual('log');
|
||||
|
||||
expect(status).toBe(64);
|
||||
},
|
||||
|
|
|
@ -17,7 +17,6 @@ import { map, filter, take } from 'rxjs/operators';
|
|||
import { safeDump } from 'js-yaml';
|
||||
import { getConfigFromFiles } from '@kbn/config';
|
||||
|
||||
const legacyConfig = follow('__fixtures__/reload_logging_config/kibana.test.yml');
|
||||
const configFileLogConsole = follow(
|
||||
'__fixtures__/reload_logging_config/kibana_log_console.test.yml'
|
||||
);
|
||||
|
@ -96,81 +95,6 @@ describe.skip('Server logging configuration', function () {
|
|||
return;
|
||||
}
|
||||
|
||||
describe('legacy logging', () => {
|
||||
it(
|
||||
'should be reloadable via SIGHUP process signaling',
|
||||
async function () {
|
||||
const configFilePath = Path.resolve(tempDir, 'kibana.yml');
|
||||
Fs.copyFileSync(legacyConfig, configFilePath);
|
||||
|
||||
child = Child.spawn(process.execPath, [
|
||||
kibanaPath,
|
||||
'--oss',
|
||||
'--config',
|
||||
configFilePath,
|
||||
'--verbose',
|
||||
]);
|
||||
|
||||
// TypeScript note: As long as the child stdio[1] is 'pipe', then stdout will not be null
|
||||
const message$ = Rx.fromEvent(child.stdout!, 'data').pipe(
|
||||
map((messages) => String(messages).split('\n').filter(Boolean))
|
||||
);
|
||||
|
||||
await message$
|
||||
.pipe(
|
||||
// We know the sighup handler will be registered before this message logged
|
||||
filter((messages: string[]) => messages.some((m) => m.includes('setting up root'))),
|
||||
take(1)
|
||||
)
|
||||
.toPromise();
|
||||
|
||||
const lastMessage = await message$.pipe(take(1)).toPromise();
|
||||
expect(containsJsonOnly(lastMessage)).toBe(true);
|
||||
|
||||
createConfigManager(configFilePath).modify((oldConfig) => {
|
||||
oldConfig.logging.json = false;
|
||||
return oldConfig;
|
||||
});
|
||||
|
||||
child.kill('SIGHUP');
|
||||
|
||||
await message$
|
||||
.pipe(
|
||||
filter((messages) => !containsJsonOnly(messages)),
|
||||
take(1)
|
||||
)
|
||||
.toPromise();
|
||||
},
|
||||
minute
|
||||
);
|
||||
|
||||
it(
|
||||
'should recreate file handle on SIGHUP',
|
||||
async function () {
|
||||
const logPath = Path.resolve(tempDir, 'kibana.log');
|
||||
const logPathArchived = Path.resolve(tempDir, 'kibana_archive.log');
|
||||
|
||||
child = Child.spawn(process.execPath, [
|
||||
kibanaPath,
|
||||
'--oss',
|
||||
'--config',
|
||||
legacyConfig,
|
||||
'--logging.dest',
|
||||
logPath,
|
||||
'--verbose',
|
||||
]);
|
||||
|
||||
await watchFileUntil(logPath, /setting up root/, 30 * second);
|
||||
// once the server is running, archive the log file and issue SIGHUP
|
||||
Fs.renameSync(logPath, logPathArchived);
|
||||
child.kill('SIGHUP');
|
||||
|
||||
await watchFileUntil(logPath, /Reloaded logging configuration due to SIGHUP/, 30 * second);
|
||||
},
|
||||
minute
|
||||
);
|
||||
});
|
||||
|
||||
describe('platform logging', () => {
|
||||
it(
|
||||
'should be reloadable via SIGHUP process signaling',
|
||||
|
|
|
@ -124,17 +124,12 @@ function applyConfigOverrides(rawConfig, opts, extraCliOptions) {
|
|||
if (opts.elasticsearch) set('elasticsearch.hosts', opts.elasticsearch.split(','));
|
||||
if (opts.port) set('server.port', opts.port);
|
||||
if (opts.host) set('server.host', opts.host);
|
||||
|
||||
if (opts.silent) {
|
||||
set('logging.silent', true);
|
||||
set('logging.root.level', 'off');
|
||||
}
|
||||
if (opts.verbose) {
|
||||
if (has('logging.root.appenders')) {
|
||||
set('logging.root.level', 'all');
|
||||
} else {
|
||||
// Only set logging.verbose to true for legacy logging when KP logging isn't configured.
|
||||
set('logging.verbose', true);
|
||||
}
|
||||
set('logging.root.level', 'all');
|
||||
}
|
||||
|
||||
set('plugins.paths', _.compact([].concat(get('plugins.paths'), opts.pluginPath)));
|
||||
|
@ -159,9 +154,8 @@ export default function (program) {
|
|||
[getConfigPath()]
|
||||
)
|
||||
.option('-p, --port <port>', 'The port to bind to', parseInt)
|
||||
.option('-q, --quiet', 'Deprecated, set logging level in your configuration')
|
||||
.option('-Q, --silent', 'Prevent all logging')
|
||||
.option('--verbose', 'Turns on verbose logging')
|
||||
.option('-Q, --silent', 'Set the root logger level to off')
|
||||
.option('--verbose', 'Set the root logger level to all')
|
||||
.option('-H, --host <host>', 'The host to bind to')
|
||||
.option(
|
||||
'-l, --log-file <path>',
|
||||
|
@ -217,8 +211,6 @@ export default function (program) {
|
|||
const cliArgs = {
|
||||
dev: !!opts.dev,
|
||||
envName: unknownOptions.env ? unknownOptions.env.name : undefined,
|
||||
// no longer supported
|
||||
quiet: !!opts.quiet,
|
||||
silent: !!opts.silent,
|
||||
verbose: !!opts.verbose,
|
||||
watch: !!opts.watch,
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
import { getDeprecationsForGlobalSettings } from '../test_utils';
|
||||
import { coreDeprecationProvider } from './core_deprecations';
|
||||
|
||||
const initialEnv = { ...process.env };
|
||||
|
||||
const applyCoreDeprecations = (settings?: Record<string, any>) =>
|
||||
|
@ -203,230 +204,4 @@ describe('core deprecations', () => {
|
|||
).toEqual([`worker-src blob:`]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.events.ops', () => {
|
||||
it('warns when ops events are used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { events: { ops: '*' } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.events.ops\\" has been deprecated and will be removed in 8.0. To access ops data moving forward, please enable debug logs for the \\"metrics.ops\\" context in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.events.request and logging.events.response', () => {
|
||||
it('warns when request and response events are used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { events: { request: '*', response: '*' } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.events.request\\" and \\"logging.events.response\\" have been deprecated and will be removed in 8.0. To access request and/or response data moving forward, please enable debug logs for the \\"http.server.response\\" context in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('warns when only request event is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { events: { request: '*' } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.events.request\\" and \\"logging.events.response\\" have been deprecated and will be removed in 8.0. To access request and/or response data moving forward, please enable debug logs for the \\"http.server.response\\" context in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('warns when only response event is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { events: { response: '*' } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.events.request\\" and \\"logging.events.response\\" have been deprecated and will be removed in 8.0. To access request and/or response data moving forward, please enable debug logs for the \\"http.server.response\\" context in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.timezone', () => {
|
||||
it('warns when ops events are used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { timezone: 'GMT' },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.timezone\\" has been deprecated and will be removed in 8.0. To set the timezone moving forward, please add a timezone date modifier to the log pattern in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.dest', () => {
|
||||
it('warns when dest is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { dest: 'stdout' },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.dest\\" has been deprecated and will be removed in 8.0. To set the destination moving forward, you can use the \\"console\\" appender in your logging configuration or define a custom one. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
it('warns when dest path is given', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { dest: '/log-log.txt' },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.dest\\" has been deprecated and will be removed in 8.0. To set the destination moving forward, you can use the \\"console\\" appender in your logging configuration or define a custom one. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.quiet, logging.silent and logging.verbose', () => {
|
||||
it('warns when quiet is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { quiet: true },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.quiet\\" has been deprecated and will be removed in 8.0. Moving forward, you can use \\"logging.root.level:error\\" in your logging configuration. ",
|
||||
]
|
||||
`);
|
||||
});
|
||||
it('warns when silent is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { silent: true },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.silent\\" has been deprecated and will be removed in 8.0. Moving forward, you can use \\"logging.root.level:off\\" in your logging configuration. ",
|
||||
]
|
||||
`);
|
||||
});
|
||||
it('warns when verbose is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { verbose: true },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.verbose\\" has been deprecated and will be removed in 8.0. Moving forward, you can use \\"logging.root.level:all\\" in your logging configuration. ",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.json', () => {
|
||||
it('warns when json is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { json: true },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.json\\" has been deprecated and will be removed in 8.0. To specify log message format moving forward, you can configure the \\"appender.layout\\" property for every custom appender in your logging configuration. There is currently no default layout for custom appenders and each one must be declared explicitly. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.rotate.enabled, logging.rotate.usePolling, logging.rotate.pollingInterval, logging.rotate.everyBytes and logging.rotate.keepFiles', () => {
|
||||
it('warns when logging.rotate configurations are used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { rotate: { enabled: true } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.rotate\\" and sub-options have been deprecated and will be removed in 8.0. Moving forward, you can enable log rotation using the \\"rolling-file\\" appender for a logger in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#rolling-file-appender",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('warns when logging.rotate polling configurations are used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { rotate: { enabled: true, usePolling: true, pollingInterval: 5000 } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.rotate\\" and sub-options have been deprecated and will be removed in 8.0. Moving forward, you can enable log rotation using the \\"rolling-file\\" appender for a logger in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#rolling-file-appender",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('warns when logging.rotate.everyBytes configurations are used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { rotate: { enabled: true, everyBytes: 1048576 } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.rotate\\" and sub-options have been deprecated and will be removed in 8.0. Moving forward, you can enable log rotation using the \\"rolling-file\\" appender for a logger in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#rolling-file-appender",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('warns when logging.rotate.keepFiles is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { rotate: { enabled: true, keepFiles: 1024 } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.rotate\\" and sub-options have been deprecated and will be removed in 8.0. Moving forward, you can enable log rotation using the \\"rolling-file\\" appender for a logger in your logging configuration. For more details, see https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#rolling-file-appender",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.events.log', () => {
|
||||
it('warns when events.log is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { events: { log: ['info'] } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.events.log\\" has been deprecated and will be removed in 8.0. Moving forward, log levels can be customized on a per-logger basis using the new logging configuration.",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.events.error', () => {
|
||||
it('warns when events.error is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { events: { error: ['some error'] } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.events.error\\" has been deprecated and will be removed in 8.0. Moving forward, you can use \\"logging.root.level: error\\" in your logging configuration.",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging.filter', () => {
|
||||
it('warns when filter.cookie is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { filter: { cookie: 'none' } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.filter\\" has been deprecated and will be removed in 8.0.",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('warns when filter.authorization is used', () => {
|
||||
const { messages } = applyCoreDeprecations({
|
||||
logging: { filter: { authorization: 'none' } },
|
||||
});
|
||||
expect(messages).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.filter\\" has been deprecated and will be removed in 8.0.",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -113,245 +113,6 @@ const cspRulesDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecati
|
|||
}
|
||||
};
|
||||
|
||||
const opsLoggingEventDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.events?.ops) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingevents',
|
||||
message:
|
||||
'"logging.events.ops" has been deprecated and will be removed ' +
|
||||
'in 8.0. To access ops data moving forward, please enable debug logs for the ' +
|
||||
'"metrics.ops" context in your logging configuration. For more details, see ' +
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.events.ops" from your kibana settings.`,
|
||||
`Enable debug logs for the "metrics.ops" context in your logging configuration`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const requestLoggingEventDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.events?.request || settings.logging?.events?.response) {
|
||||
const removeConfigsSteps = [];
|
||||
|
||||
if (settings.logging?.events?.request) {
|
||||
removeConfigsSteps.push(`Remove "logging.events.request" from your kibana configs.`);
|
||||
}
|
||||
|
||||
if (settings.logging?.events?.response) {
|
||||
removeConfigsSteps.push(`Remove "logging.events.response" from your kibana configs.`);
|
||||
}
|
||||
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingevents',
|
||||
message:
|
||||
'"logging.events.request" and "logging.events.response" have been deprecated and will be removed ' +
|
||||
'in 8.0. To access request and/or response data moving forward, please enable debug logs for the ' +
|
||||
'"http.server.response" context in your logging configuration. For more details, see ' +
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
...removeConfigsSteps,
|
||||
`enable debug logs for the "http.server.response" context in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const timezoneLoggingDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.timezone) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingtimezone',
|
||||
message:
|
||||
'"logging.timezone" has been deprecated and will be removed ' +
|
||||
'in 8.0. To set the timezone moving forward, please add a timezone date modifier to the log pattern ' +
|
||||
'in your logging configuration. For more details, see ' +
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.timezone" from your kibana configs.`,
|
||||
`To set the timezone add a timezone date modifier to the log pattern in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const destLoggingDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.dest) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingdest',
|
||||
message:
|
||||
'"logging.dest" has been deprecated and will be removed ' +
|
||||
'in 8.0. To set the destination moving forward, you can use the "console" appender ' +
|
||||
'in your logging configuration or define a custom one. For more details, see ' +
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.dest" from your kibana configs.`,
|
||||
`To set the destination use the "console" appender in your logging configuration or define a custom one.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const quietLoggingDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.quiet) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingquiet',
|
||||
message:
|
||||
'"logging.quiet" has been deprecated and will be removed ' +
|
||||
'in 8.0. Moving forward, you can use "logging.root.level:error" in your logging configuration. ',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.quiet" from your kibana configs.`,
|
||||
`Use "logging.root.level:error" in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const silentLoggingDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.silent) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingsilent',
|
||||
message:
|
||||
'"logging.silent" has been deprecated and will be removed ' +
|
||||
'in 8.0. Moving forward, you can use "logging.root.level:off" in your logging configuration. ',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.silent" from your kibana configs.`,
|
||||
`Use "logging.root.level:off" in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const verboseLoggingDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.verbose) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingverbose',
|
||||
message:
|
||||
'"logging.verbose" has been deprecated and will be removed ' +
|
||||
'in 8.0. Moving forward, you can use "logging.root.level:all" in your logging configuration. ',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.verbose" from your kibana configs.`,
|
||||
`Use "logging.root.level:all" in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const jsonLoggingDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
// We silence the deprecation warning when running in development mode because
|
||||
// the dev CLI code in src/dev/cli_dev_mode/using_server_process.ts manually
|
||||
// specifies `--logging.json=false`. Since it's executed in a child process, the
|
||||
// ` legacyLoggingConfigSchema` returns `true` for the TTY check on `process.stdout.isTTY`
|
||||
if (settings.logging?.json && settings.env !== 'development') {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx',
|
||||
message:
|
||||
'"logging.json" has been deprecated and will be removed ' +
|
||||
'in 8.0. To specify log message format moving forward, ' +
|
||||
'you can configure the "appender.layout" property for every custom appender in your logging configuration. ' +
|
||||
'There is currently no default layout for custom appenders and each one must be declared explicitly. ' +
|
||||
'For more details, see ' +
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.json" from your kibana configs.`,
|
||||
`Configure the "appender.layout" property for every custom appender in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const logRotateDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.rotate) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#rolling-file-appender',
|
||||
message:
|
||||
'"logging.rotate" and sub-options have been deprecated and will be removed in 8.0. ' +
|
||||
'Moving forward, you can enable log rotation using the "rolling-file" appender for a logger ' +
|
||||
'in your logging configuration. For more details, see ' +
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#rolling-file-appender',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.rotate" from your kibana configs.`,
|
||||
`Enable log rotation using the "rolling-file" appender for a logger in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const logEventsLogDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.events?.log) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingevents',
|
||||
message:
|
||||
'"logging.events.log" has been deprecated and will be removed ' +
|
||||
'in 8.0. Moving forward, log levels can be customized on a per-logger basis using the new logging configuration.',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.events.log" from your kibana configs.`,
|
||||
`Customize log levels can be per-logger using the new logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const logEventsErrorDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.events?.error) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingevents',
|
||||
message:
|
||||
'"logging.events.error" has been deprecated and will be removed ' +
|
||||
'in 8.0. Moving forward, you can use "logging.root.level: error" in your logging configuration.',
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove "logging.events.error" from your kibana configs.`,
|
||||
`Use "logging.root.level: error" in your logging configuration.`,
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const logFilterDeprecation: ConfigDeprecation = (settings, fromPath, addDeprecation) => {
|
||||
if (settings.logging?.filter) {
|
||||
addDeprecation({
|
||||
documentationUrl:
|
||||
'https://github.com/elastic/kibana/blob/master/src/core/server/logging/README.mdx#loggingfilter',
|
||||
message: '"logging.filter" has been deprecated and will be removed in 8.0.',
|
||||
correctiveActions: {
|
||||
manualSteps: [`Remove "logging.filter" from your kibana configs.`],
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
export const coreDeprecationProvider: ConfigDeprecationProvider = ({ rename, unusedFromRoot }) => [
|
||||
rename('cpu.cgroup.path.override', 'ops.cGroupOverrides.cpuPath'),
|
||||
rename('cpuacct.cgroup.path.override', 'ops.cGroupOverrides.cpuAcctPath'),
|
||||
|
@ -360,16 +121,4 @@ export const coreDeprecationProvider: ConfigDeprecationProvider = ({ rename, unu
|
|||
kibanaPathConf,
|
||||
rewriteBasePathDeprecation,
|
||||
cspRulesDeprecation,
|
||||
opsLoggingEventDeprecation,
|
||||
requestLoggingEventDeprecation,
|
||||
timezoneLoggingDeprecation,
|
||||
destLoggingDeprecation,
|
||||
quietLoggingDeprecation,
|
||||
silentLoggingDeprecation,
|
||||
verboseLoggingDeprecation,
|
||||
jsonLoggingDeprecation,
|
||||
logRotateDeprecation,
|
||||
logEventsLogDeprecation,
|
||||
logEventsErrorDeprecation,
|
||||
logFilterDeprecation,
|
||||
];
|
||||
|
|
|
@ -30,5 +30,4 @@ export type {
|
|||
ConfigDeprecationFactory,
|
||||
EnvironmentMode,
|
||||
PackageInfo,
|
||||
LegacyObjectToConfigAdapter,
|
||||
} from '@kbn/config';
|
||||
|
|
|
@ -23,17 +23,13 @@ describe('configuration deprecations', () => {
|
|||
}
|
||||
});
|
||||
|
||||
it('should not log deprecation warnings for default configuration that is not one of `logging.verbose`, `logging.quiet` or `logging.silent`', async () => {
|
||||
it('should not log deprecation warnings for default configuration', async () => {
|
||||
root = kbnTestServer.createRoot();
|
||||
|
||||
await root.preboot();
|
||||
await root.setup();
|
||||
|
||||
const logs = loggingSystemMock.collect(mockLoggingSystem);
|
||||
expect(logs.warn.flat()).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"\\"logging.silent\\" has been deprecated and will be removed in 8.0. Moving forward, you can use \\"logging.root.level:off\\" in your logging configuration. ",
|
||||
]
|
||||
`);
|
||||
expect(logs.warn.flat()).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -211,7 +211,7 @@ const deprecations: ConfigDeprecationProvider = () => [
|
|||
});
|
||||
} else if (es.logQueries === true) {
|
||||
addDeprecation({
|
||||
message: `Setting [${fromPath}.logQueries] is deprecated and no longer used. You should set the log level to "debug" for the "elasticsearch.queries" context in "logging.loggers" or use "logging.verbose: true".`,
|
||||
message: `Setting [${fromPath}.logQueries] is deprecated and no longer used. You should set the log level to "debug" for the "elasticsearch.queries" context in "logging.loggers".`,
|
||||
correctiveActions: {
|
||||
manualSteps: [
|
||||
`Remove Setting [${fromPath}.logQueries] from your kibana configs`,
|
||||
|
|
|
@ -51,7 +51,6 @@ describe('request logging', () => {
|
|||
it('logs at the correct level and with the correct context', async () => {
|
||||
const root = kbnTestServer.createRoot({
|
||||
logging: {
|
||||
silent: true,
|
||||
appenders: {
|
||||
'test-console': {
|
||||
type: 'console',
|
||||
|
@ -99,7 +98,6 @@ describe('request logging', () => {
|
|||
let root: ReturnType<typeof kbnTestServer.createRoot>;
|
||||
const config = {
|
||||
logging: {
|
||||
silent: true,
|
||||
appenders: {
|
||||
'test-console': {
|
||||
type: 'console',
|
||||
|
@ -300,7 +298,6 @@ describe('request logging', () => {
|
|||
it('filters sensitive request headers when RewriteAppender is configured', async () => {
|
||||
root = kbnTestServer.createRoot({
|
||||
logging: {
|
||||
silent: true,
|
||||
appenders: {
|
||||
'test-console': {
|
||||
type: 'console',
|
||||
|
@ -402,7 +399,6 @@ describe('request logging', () => {
|
|||
it('filters sensitive response headers when RewriteAppender is configured', async () => {
|
||||
root = kbnTestServer.createRoot({
|
||||
logging: {
|
||||
silent: true,
|
||||
appenders: {
|
||||
'test-console': {
|
||||
type: 'console',
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
/** @internal */
|
||||
export type { ILegacyService } from './legacy_service';
|
||||
export { LegacyService } from './legacy_service';
|
|
@ -1,234 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { LegacyLoggingConfig } from '@kbn/config';
|
||||
import * as kbnTestServer from '../../../test_helpers/kbn_server';
|
||||
|
||||
import {
|
||||
getPlatformLogsFromMock,
|
||||
getLegacyPlatformLogsFromMock,
|
||||
} from '../../logging/integration_tests/utils';
|
||||
|
||||
function createRoot(legacyLoggingConfig: LegacyLoggingConfig = {}) {
|
||||
return kbnTestServer.createRoot({
|
||||
migrations: { skip: true }, // otherwise stuck in polling ES
|
||||
plugins: { initialize: false },
|
||||
elasticsearch: { skipStartupConnectionCheck: true },
|
||||
logging: {
|
||||
// legacy platform config
|
||||
silent: false,
|
||||
json: false,
|
||||
...legacyLoggingConfig,
|
||||
events: {
|
||||
log: ['test-file-legacy'],
|
||||
},
|
||||
// platform config
|
||||
appenders: {
|
||||
'test-console': {
|
||||
type: 'console',
|
||||
layout: {
|
||||
highlight: false,
|
||||
type: 'pattern',
|
||||
},
|
||||
},
|
||||
},
|
||||
loggers: [
|
||||
{
|
||||
name: 'test-file',
|
||||
appenders: ['test-console'],
|
||||
level: 'info',
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
describe('logging service', () => {
|
||||
let mockConsoleLog: jest.SpyInstance;
|
||||
let mockStdout: jest.SpyInstance;
|
||||
|
||||
beforeAll(async () => {
|
||||
mockConsoleLog = jest.spyOn(global.console, 'log');
|
||||
mockStdout = jest.spyOn(global.process.stdout, 'write');
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
mockConsoleLog.mockRestore();
|
||||
mockStdout.mockRestore();
|
||||
});
|
||||
|
||||
describe('compatibility', () => {
|
||||
describe('uses configured loggers', () => {
|
||||
let root: ReturnType<typeof createRoot>;
|
||||
beforeAll(async () => {
|
||||
root = createRoot();
|
||||
|
||||
await root.preboot();
|
||||
await root.setup();
|
||||
await root.start();
|
||||
}, 30000);
|
||||
|
||||
afterAll(async () => {
|
||||
await root.shutdown();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
mockConsoleLog.mockClear();
|
||||
mockStdout.mockClear();
|
||||
});
|
||||
|
||||
it('when context matches', async () => {
|
||||
root.logger.get('test-file').info('handled by NP');
|
||||
|
||||
expect(mockConsoleLog).toHaveBeenCalledTimes(1);
|
||||
const loggedString = getPlatformLogsFromMock(mockConsoleLog);
|
||||
expect(loggedString).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][INFO ][test-file] handled by NP",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('falls back to the root legacy logger otherwise', async () => {
|
||||
root.logger.get('test-file-legacy').info('handled by LP');
|
||||
|
||||
expect(mockStdout).toHaveBeenCalledTimes(1);
|
||||
|
||||
const loggedString = getLegacyPlatformLogsFromMock(mockStdout);
|
||||
expect(loggedString).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
" log [xx:xx:xx.xxx] [info][test-file-legacy] handled by LP
|
||||
",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('logging config respects legacy logging settings', () => {
|
||||
let root: ReturnType<typeof createRoot>;
|
||||
|
||||
afterEach(async () => {
|
||||
mockConsoleLog.mockClear();
|
||||
mockStdout.mockClear();
|
||||
await root.shutdown();
|
||||
});
|
||||
|
||||
it('"silent": true', async () => {
|
||||
root = createRoot({ silent: true });
|
||||
|
||||
await root.preboot();
|
||||
await root.setup();
|
||||
await root.start();
|
||||
|
||||
const platformLogger = root.logger.get('test-file');
|
||||
platformLogger.info('info');
|
||||
platformLogger.warn('warn');
|
||||
platformLogger.error('error');
|
||||
|
||||
expect(mockConsoleLog).toHaveBeenCalledTimes(3);
|
||||
|
||||
expect(getPlatformLogsFromMock(mockConsoleLog)).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][INFO ][test-file] info",
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][WARN ][test-file] warn",
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][ERROR][test-file] error",
|
||||
]
|
||||
`);
|
||||
|
||||
mockStdout.mockClear();
|
||||
|
||||
const legacyPlatformLogger = root.logger.get('test-file-legacy');
|
||||
legacyPlatformLogger.info('info');
|
||||
legacyPlatformLogger.warn('warn');
|
||||
legacyPlatformLogger.error('error');
|
||||
|
||||
expect(mockStdout).toHaveBeenCalledTimes(0);
|
||||
});
|
||||
|
||||
it('"quiet": true', async () => {
|
||||
root = createRoot({ quiet: true });
|
||||
|
||||
await root.preboot();
|
||||
await root.setup();
|
||||
await root.start();
|
||||
|
||||
const platformLogger = root.logger.get('test-file');
|
||||
platformLogger.info('info');
|
||||
platformLogger.warn('warn');
|
||||
platformLogger.error('error');
|
||||
|
||||
expect(mockConsoleLog).toHaveBeenCalledTimes(3);
|
||||
|
||||
expect(getPlatformLogsFromMock(mockConsoleLog)).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][INFO ][test-file] info",
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][WARN ][test-file] warn",
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][ERROR][test-file] error",
|
||||
]
|
||||
`);
|
||||
|
||||
mockStdout.mockClear();
|
||||
|
||||
const legacyPlatformLogger = root.logger.get('test-file-legacy');
|
||||
legacyPlatformLogger.info('info');
|
||||
legacyPlatformLogger.warn('warn');
|
||||
legacyPlatformLogger.error('error');
|
||||
|
||||
expect(mockStdout).toHaveBeenCalledTimes(1);
|
||||
expect(getLegacyPlatformLogsFromMock(mockStdout)).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
" log [xx:xx:xx.xxx] [error][test-file-legacy] error
|
||||
",
|
||||
]
|
||||
`);
|
||||
});
|
||||
|
||||
it('"verbose": true', async () => {
|
||||
root = createRoot({ verbose: true });
|
||||
|
||||
await root.preboot();
|
||||
await root.setup();
|
||||
await root.start();
|
||||
|
||||
const platformLogger = root.logger.get('test-file');
|
||||
platformLogger.info('info');
|
||||
platformLogger.warn('warn');
|
||||
platformLogger.error('error');
|
||||
|
||||
expect(mockConsoleLog).toHaveBeenCalledTimes(3);
|
||||
|
||||
expect(getPlatformLogsFromMock(mockConsoleLog)).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][INFO ][test-file] info",
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][WARN ][test-file] warn",
|
||||
"[xxxx-xx-xxTxx:xx:xx.xxx-xx:xx][ERROR][test-file] error",
|
||||
]
|
||||
`);
|
||||
|
||||
mockStdout.mockClear();
|
||||
|
||||
const legacyPlatformLogger = root.logger.get('test-file-legacy');
|
||||
legacyPlatformLogger.info('info');
|
||||
legacyPlatformLogger.warn('warn');
|
||||
legacyPlatformLogger.error('error');
|
||||
|
||||
expect(mockStdout).toHaveBeenCalledTimes(3);
|
||||
expect(getLegacyPlatformLogsFromMock(mockStdout)).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
" log [xx:xx:xx.xxx] [info][test-file-legacy] info
|
||||
",
|
||||
" log [xx:xx:xx.xxx] [warning][test-file-legacy] warn
|
||||
",
|
||||
" log [xx:xx:xx.xxx] [error][test-file-legacy] error
|
||||
",
|
||||
]
|
||||
`);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,21 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import type { PublicMethodsOf } from '@kbn/utility-types';
|
||||
import { LegacyService } from './legacy_service';
|
||||
|
||||
type LegacyServiceMock = jest.Mocked<PublicMethodsOf<LegacyService>>;
|
||||
|
||||
const createLegacyServiceMock = (): LegacyServiceMock => ({
|
||||
setup: jest.fn(),
|
||||
stop: jest.fn(),
|
||||
});
|
||||
|
||||
export const legacyServiceMock = {
|
||||
create: createLegacyServiceMock,
|
||||
};
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export const reconfigureLoggingMock = jest.fn();
|
||||
export const setupLoggingMock = jest.fn();
|
||||
export const setupLoggingRotateMock = jest.fn();
|
||||
|
||||
jest.doMock('@kbn/legacy-logging', () => ({
|
||||
...(jest.requireActual('@kbn/legacy-logging') as any),
|
||||
reconfigureLogging: reconfigureLoggingMock,
|
||||
setupLogging: setupLoggingMock,
|
||||
setupLoggingRotate: setupLoggingRotateMock,
|
||||
}));
|
|
@ -1,197 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import {
|
||||
setupLoggingMock,
|
||||
setupLoggingRotateMock,
|
||||
reconfigureLoggingMock,
|
||||
} from './legacy_service.test.mocks';
|
||||
|
||||
import { BehaviorSubject } from 'rxjs';
|
||||
import moment from 'moment';
|
||||
import { REPO_ROOT } from '@kbn/dev-utils';
|
||||
|
||||
import { Config, Env, ObjectToConfigAdapter } from '../config';
|
||||
|
||||
import { getEnvOptions, configServiceMock } from '../config/mocks';
|
||||
import { loggingSystemMock } from '../logging/logging_system.mock';
|
||||
import { httpServiceMock } from '../http/http_service.mock';
|
||||
import { LegacyService, LegacyServiceSetupDeps } from './legacy_service';
|
||||
|
||||
let coreId: symbol;
|
||||
let env: Env;
|
||||
let config$: BehaviorSubject<Config>;
|
||||
|
||||
let setupDeps: LegacyServiceSetupDeps;
|
||||
|
||||
const logger = loggingSystemMock.create();
|
||||
let configService: ReturnType<typeof configServiceMock.create>;
|
||||
|
||||
beforeEach(() => {
|
||||
coreId = Symbol();
|
||||
env = Env.createDefault(REPO_ROOT, getEnvOptions());
|
||||
configService = configServiceMock.create();
|
||||
|
||||
setupDeps = {
|
||||
http: httpServiceMock.createInternalSetupContract(),
|
||||
};
|
||||
|
||||
config$ = new BehaviorSubject<Config>(
|
||||
new ObjectToConfigAdapter({
|
||||
elasticsearch: { hosts: ['http://127.0.0.1'] },
|
||||
server: { autoListen: true },
|
||||
})
|
||||
);
|
||||
|
||||
configService.getConfig$.mockReturnValue(config$);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
setupLoggingMock.mockReset();
|
||||
setupLoggingRotateMock.mockReset();
|
||||
reconfigureLoggingMock.mockReset();
|
||||
});
|
||||
|
||||
describe('#setup', () => {
|
||||
it('initializes legacy logging', async () => {
|
||||
const opsConfig = {
|
||||
interval: moment.duration(5, 'second'),
|
||||
};
|
||||
const opsConfig$ = new BehaviorSubject(opsConfig);
|
||||
|
||||
const loggingConfig = {
|
||||
foo: 'bar',
|
||||
};
|
||||
const loggingConfig$ = new BehaviorSubject(loggingConfig);
|
||||
|
||||
configService.atPath.mockImplementation((path) => {
|
||||
if (path === 'ops') {
|
||||
return opsConfig$;
|
||||
}
|
||||
if (path === 'logging') {
|
||||
return loggingConfig$;
|
||||
}
|
||||
return new BehaviorSubject({});
|
||||
});
|
||||
|
||||
const legacyService = new LegacyService({
|
||||
coreId,
|
||||
env,
|
||||
logger,
|
||||
configService: configService as any,
|
||||
});
|
||||
|
||||
await legacyService.setup(setupDeps);
|
||||
|
||||
expect(setupLoggingMock).toHaveBeenCalledTimes(1);
|
||||
expect(setupLoggingMock).toHaveBeenCalledWith(
|
||||
setupDeps.http.server,
|
||||
loggingConfig,
|
||||
opsConfig.interval.asMilliseconds()
|
||||
);
|
||||
|
||||
expect(setupLoggingRotateMock).toHaveBeenCalledTimes(1);
|
||||
expect(setupLoggingRotateMock).toHaveBeenCalledWith(setupDeps.http.server, loggingConfig);
|
||||
});
|
||||
|
||||
it('reloads the logging config when the config changes', async () => {
|
||||
const opsConfig = {
|
||||
interval: moment.duration(5, 'second'),
|
||||
};
|
||||
const opsConfig$ = new BehaviorSubject(opsConfig);
|
||||
|
||||
const loggingConfig = {
|
||||
foo: 'bar',
|
||||
};
|
||||
const loggingConfig$ = new BehaviorSubject(loggingConfig);
|
||||
|
||||
configService.atPath.mockImplementation((path) => {
|
||||
if (path === 'ops') {
|
||||
return opsConfig$;
|
||||
}
|
||||
if (path === 'logging') {
|
||||
return loggingConfig$;
|
||||
}
|
||||
return new BehaviorSubject({});
|
||||
});
|
||||
|
||||
const legacyService = new LegacyService({
|
||||
coreId,
|
||||
env,
|
||||
logger,
|
||||
configService: configService as any,
|
||||
});
|
||||
|
||||
await legacyService.setup(setupDeps);
|
||||
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledTimes(1);
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledWith(
|
||||
setupDeps.http.server,
|
||||
loggingConfig,
|
||||
opsConfig.interval.asMilliseconds()
|
||||
);
|
||||
|
||||
loggingConfig$.next({
|
||||
foo: 'changed',
|
||||
});
|
||||
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledTimes(2);
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledWith(
|
||||
setupDeps.http.server,
|
||||
{ foo: 'changed' },
|
||||
opsConfig.interval.asMilliseconds()
|
||||
);
|
||||
});
|
||||
|
||||
it('stops reloading logging config once the service is stopped', async () => {
|
||||
const opsConfig = {
|
||||
interval: moment.duration(5, 'second'),
|
||||
};
|
||||
const opsConfig$ = new BehaviorSubject(opsConfig);
|
||||
|
||||
const loggingConfig = {
|
||||
foo: 'bar',
|
||||
};
|
||||
const loggingConfig$ = new BehaviorSubject(loggingConfig);
|
||||
|
||||
configService.atPath.mockImplementation((path) => {
|
||||
if (path === 'ops') {
|
||||
return opsConfig$;
|
||||
}
|
||||
if (path === 'logging') {
|
||||
return loggingConfig$;
|
||||
}
|
||||
return new BehaviorSubject({});
|
||||
});
|
||||
|
||||
const legacyService = new LegacyService({
|
||||
coreId,
|
||||
env,
|
||||
logger,
|
||||
configService: configService as any,
|
||||
});
|
||||
|
||||
await legacyService.setup(setupDeps);
|
||||
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledTimes(1);
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledWith(
|
||||
setupDeps.http.server,
|
||||
loggingConfig,
|
||||
opsConfig.interval.asMilliseconds()
|
||||
);
|
||||
|
||||
await legacyService.stop();
|
||||
|
||||
loggingConfig$.next({
|
||||
foo: 'changed',
|
||||
});
|
||||
|
||||
expect(reconfigureLoggingMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { combineLatest, Observable, Subscription } from 'rxjs';
|
||||
import { first } from 'rxjs/operators';
|
||||
import { Server } from '@hapi/hapi';
|
||||
import type { PublicMethodsOf } from '@kbn/utility-types';
|
||||
import {
|
||||
reconfigureLogging,
|
||||
setupLogging,
|
||||
setupLoggingRotate,
|
||||
LegacyLoggingConfig,
|
||||
} from '@kbn/legacy-logging';
|
||||
|
||||
import { CoreContext } from '../core_context';
|
||||
import { config as loggingConfig } from '../logging';
|
||||
import { opsConfig, OpsConfigType } from '../metrics';
|
||||
import { Logger } from '../logging';
|
||||
import { InternalHttpServiceSetup } from '../http';
|
||||
|
||||
export interface LegacyServiceSetupDeps {
|
||||
http: InternalHttpServiceSetup;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export type ILegacyService = PublicMethodsOf<LegacyService>;
|
||||
|
||||
/** @internal */
|
||||
export class LegacyService {
|
||||
private readonly log: Logger;
|
||||
private readonly opsConfig$: Observable<OpsConfigType>;
|
||||
private readonly legacyLoggingConfig$: Observable<LegacyLoggingConfig>;
|
||||
private configSubscription?: Subscription;
|
||||
|
||||
constructor(coreContext: CoreContext) {
|
||||
const { logger, configService } = coreContext;
|
||||
|
||||
this.log = logger.get('legacy-service');
|
||||
this.legacyLoggingConfig$ = configService.atPath<LegacyLoggingConfig>(loggingConfig.path);
|
||||
this.opsConfig$ = configService.atPath<OpsConfigType>(opsConfig.path);
|
||||
}
|
||||
|
||||
public async setup(setupDeps: LegacyServiceSetupDeps) {
|
||||
this.log.debug('setting up legacy service');
|
||||
await this.setupLegacyLogging(setupDeps.http.server);
|
||||
}
|
||||
|
||||
private async setupLegacyLogging(server: Server) {
|
||||
const legacyLoggingConfig = await this.legacyLoggingConfig$.pipe(first()).toPromise();
|
||||
const currentOpsConfig = await this.opsConfig$.pipe(first()).toPromise();
|
||||
|
||||
await setupLogging(server, legacyLoggingConfig, currentOpsConfig.interval.asMilliseconds());
|
||||
await setupLoggingRotate(server, legacyLoggingConfig);
|
||||
|
||||
this.configSubscription = combineLatest([this.legacyLoggingConfig$, this.opsConfig$]).subscribe(
|
||||
([newLoggingConfig, newOpsConfig]) => {
|
||||
reconfigureLogging(server, newLoggingConfig, newOpsConfig.interval.asMilliseconds());
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
this.log.debug('stopping legacy service');
|
||||
|
||||
if (this.configSubscription !== undefined) {
|
||||
this.configSubscription.unsubscribe();
|
||||
this.configSubscription = undefined;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 1`] = `
|
||||
Object {
|
||||
"context": "context-1",
|
||||
"level": LogLevel {
|
||||
"id": "trace",
|
||||
"value": 7,
|
||||
},
|
||||
"message": "message-1",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 2`] = `
|
||||
Object {
|
||||
"context": "context-2",
|
||||
"level": LogLevel {
|
||||
"id": "debug",
|
||||
"value": 6,
|
||||
},
|
||||
"message": "message-2",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 3`] = `
|
||||
Object {
|
||||
"context": "context-3.sub-context-3",
|
||||
"level": LogLevel {
|
||||
"id": "info",
|
||||
"value": 5,
|
||||
},
|
||||
"message": "message-3",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 4`] = `
|
||||
Object {
|
||||
"context": "context-4.sub-context-4",
|
||||
"level": LogLevel {
|
||||
"id": "warn",
|
||||
"value": 4,
|
||||
},
|
||||
"message": "message-4",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 5`] = `
|
||||
Object {
|
||||
"context": "context-5",
|
||||
"error": [Error: Some Error],
|
||||
"level": LogLevel {
|
||||
"id": "error",
|
||||
"value": 3,
|
||||
},
|
||||
"message": "message-5-with-error",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 6`] = `
|
||||
Object {
|
||||
"context": "context-6",
|
||||
"level": LogLevel {
|
||||
"id": "error",
|
||||
"value": 3,
|
||||
},
|
||||
"message": "message-6-with-message",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 7`] = `
|
||||
Object {
|
||||
"context": "context-7.sub-context-7.sub-sub-context-7",
|
||||
"error": [Error: Some Fatal Error],
|
||||
"level": LogLevel {
|
||||
"id": "fatal",
|
||||
"value": 2,
|
||||
},
|
||||
"message": "message-7-with-error",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 8`] = `
|
||||
Object {
|
||||
"context": "context-8.sub-context-8.sub-sub-context-8",
|
||||
"level": LogLevel {
|
||||
"id": "fatal",
|
||||
"value": 2,
|
||||
},
|
||||
"message": "message-8-with-message",
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 9`] = `
|
||||
Object {
|
||||
"context": "context-9.sub-context-9",
|
||||
"level": LogLevel {
|
||||
"id": "info",
|
||||
"value": 5,
|
||||
},
|
||||
"message": "message-9-with-message",
|
||||
"meta": Object {
|
||||
"someValue": 3,
|
||||
},
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`\`append()\` correctly pushes records to legacy platform. 10`] = `
|
||||
Object {
|
||||
"context": "context-10.sub-context-10",
|
||||
"level": LogLevel {
|
||||
"id": "info",
|
||||
"value": 5,
|
||||
},
|
||||
"message": "message-10-with-message",
|
||||
"meta": Object {
|
||||
"tags": Array [
|
||||
"tag1",
|
||||
"tag2",
|
||||
],
|
||||
},
|
||||
"pid": Any<Number>,
|
||||
"timestamp": 2012-02-01T11:22:33.044Z,
|
||||
}
|
||||
`;
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
jest.mock('@kbn/legacy-logging');
|
||||
|
||||
import { LogRecord, LogLevel } from '../../../logging';
|
||||
import { LegacyLoggingServer } from '@kbn/legacy-logging';
|
||||
import { LegacyAppender } from './legacy_appender';
|
||||
|
||||
afterEach(() => (LegacyLoggingServer as any).mockClear());
|
||||
|
||||
test('`configSchema` creates correct schema.', () => {
|
||||
const appenderSchema = LegacyAppender.configSchema;
|
||||
const validConfig = { type: 'legacy-appender', legacyLoggingConfig: { verbose: true } };
|
||||
expect(appenderSchema.validate(validConfig)).toEqual({
|
||||
type: 'legacy-appender',
|
||||
legacyLoggingConfig: { verbose: true },
|
||||
});
|
||||
|
||||
const wrongConfig = { type: 'not-legacy-appender' };
|
||||
expect(() => appenderSchema.validate(wrongConfig)).toThrow();
|
||||
});
|
||||
|
||||
test('`append()` correctly pushes records to legacy platform.', () => {
|
||||
const timestamp = new Date(Date.UTC(2012, 1, 1, 11, 22, 33, 44));
|
||||
const records: LogRecord[] = [
|
||||
{
|
||||
context: 'context-1',
|
||||
level: LogLevel.Trace,
|
||||
message: 'message-1',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-2',
|
||||
level: LogLevel.Debug,
|
||||
message: 'message-2',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-3.sub-context-3',
|
||||
level: LogLevel.Info,
|
||||
message: 'message-3',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-4.sub-context-4',
|
||||
level: LogLevel.Warn,
|
||||
message: 'message-4',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-5',
|
||||
error: new Error('Some Error'),
|
||||
level: LogLevel.Error,
|
||||
message: 'message-5-with-error',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-6',
|
||||
level: LogLevel.Error,
|
||||
message: 'message-6-with-message',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-7.sub-context-7.sub-sub-context-7',
|
||||
error: new Error('Some Fatal Error'),
|
||||
level: LogLevel.Fatal,
|
||||
message: 'message-7-with-error',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-8.sub-context-8.sub-sub-context-8',
|
||||
level: LogLevel.Fatal,
|
||||
message: 'message-8-with-message',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
},
|
||||
{
|
||||
context: 'context-9.sub-context-9',
|
||||
level: LogLevel.Info,
|
||||
message: 'message-9-with-message',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
meta: { someValue: 3 },
|
||||
},
|
||||
{
|
||||
context: 'context-10.sub-context-10',
|
||||
level: LogLevel.Info,
|
||||
message: 'message-10-with-message',
|
||||
timestamp,
|
||||
pid: 5355,
|
||||
meta: { tags: ['tag1', 'tag2'] },
|
||||
},
|
||||
];
|
||||
|
||||
const appender = new LegacyAppender({ verbose: true });
|
||||
for (const record of records) {
|
||||
appender.append(record);
|
||||
}
|
||||
|
||||
const [mockLegacyLoggingServerInstance] = (LegacyLoggingServer as any).mock.instances;
|
||||
expect(mockLegacyLoggingServerInstance.log.mock.calls).toHaveLength(records.length);
|
||||
records.forEach((r, idx) => {
|
||||
expect(mockLegacyLoggingServerInstance.log.mock.calls[idx][0]).toMatchSnapshot({
|
||||
pid: expect.any(Number),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
test('legacy logging server is correctly created and disposed.', async () => {
|
||||
const mockRawLegacyLoggingConfig = { verbose: true };
|
||||
const appender = new LegacyAppender(mockRawLegacyLoggingConfig);
|
||||
|
||||
expect(LegacyLoggingServer).toHaveBeenCalledTimes(1);
|
||||
expect(LegacyLoggingServer).toHaveBeenCalledWith(mockRawLegacyLoggingConfig);
|
||||
|
||||
const [mockLegacyLoggingServerInstance] = (LegacyLoggingServer as any).mock.instances;
|
||||
expect(mockLegacyLoggingServerInstance.stop).not.toHaveBeenCalled();
|
||||
|
||||
await appender.dispose();
|
||||
|
||||
expect(mockLegacyLoggingServerInstance.stop).toHaveBeenCalledTimes(1);
|
||||
});
|
|
@ -1,52 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { schema } from '@kbn/config-schema';
|
||||
import { LegacyLoggingServer } from '@kbn/legacy-logging';
|
||||
import { DisposableAppender, LogRecord } from '@kbn/logging';
|
||||
|
||||
export interface LegacyAppenderConfig {
|
||||
type: 'legacy-appender';
|
||||
legacyLoggingConfig?: Record<string, any>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple appender that just forwards `LogRecord` to the legacy KbnServer log.
|
||||
* @internal
|
||||
*/
|
||||
export class LegacyAppender implements DisposableAppender {
|
||||
public static configSchema = schema.object({
|
||||
type: schema.literal('legacy-appender'),
|
||||
legacyLoggingConfig: schema.recordOf(schema.string(), schema.any()),
|
||||
});
|
||||
|
||||
/**
|
||||
* Sets {@link Appender.receiveAllLevels} because legacy does its own filtering based on the legacy logging
|
||||
* configuration.
|
||||
*/
|
||||
public readonly receiveAllLevels = true;
|
||||
|
||||
private readonly loggingServer: LegacyLoggingServer;
|
||||
|
||||
constructor(legacyLoggingConfig: any) {
|
||||
this.loggingServer = new LegacyLoggingServer(legacyLoggingConfig);
|
||||
}
|
||||
|
||||
/**
|
||||
* Forwards `LogRecord` to the legacy platform that will layout and
|
||||
* write record to the configured destination.
|
||||
* @param record `LogRecord` instance to forward to.
|
||||
*/
|
||||
public append(record: LogRecord) {
|
||||
this.loggingServer.log(record);
|
||||
}
|
||||
|
||||
public dispose() {
|
||||
this.loggingServer.stop();
|
||||
}
|
||||
}
|
|
@ -562,11 +562,6 @@ The log will be less verbose with `warn` level for the `server` context name:
|
|||
```
|
||||
|
||||
### Logging config migration
|
||||
Compatibility with the legacy logging system is assured until the end of the `v7` version.
|
||||
All log messages handled by `root` context are forwarded to the legacy logging service using a `default` appender. If you re-write
|
||||
root appenders, make sure that it contains `default` appender to provide backward compatibility.
|
||||
**Note**: If you define an appender for a context name, the log messages for that specific context aren't handled by the
|
||||
`root` context anymore and not forwarded to the legacy logging service.
|
||||
|
||||
#### logging.dest
|
||||
By default logs in *stdout*. With new Kibana logging you can use pre-existing `console` appender or
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
import { mockCreateLayout } from './appenders.test.mocks';
|
||||
|
||||
import { ByteSizeValue } from '@kbn/config-schema';
|
||||
import { LegacyAppender } from '../../legacy/logging/appenders/legacy_appender';
|
||||
import { Appenders } from './appenders';
|
||||
import { ConsoleAppender } from './console/console_appender';
|
||||
import { FileAppender } from './file/file_appender';
|
||||
|
@ -68,13 +67,6 @@ test('`create()` creates correct appender.', () => {
|
|||
});
|
||||
expect(fileAppender).toBeInstanceOf(FileAppender);
|
||||
|
||||
const legacyAppender = Appenders.create({
|
||||
type: 'legacy-appender',
|
||||
legacyLoggingConfig: { verbose: true },
|
||||
});
|
||||
|
||||
expect(legacyAppender).toBeInstanceOf(LegacyAppender);
|
||||
|
||||
const rollingFileAppender = Appenders.create({
|
||||
type: 'rolling-file',
|
||||
fileName: 'path',
|
||||
|
|
|
@ -10,10 +10,6 @@ import { schema } from '@kbn/config-schema';
|
|||
import { assertNever } from '@kbn/std';
|
||||
import { DisposableAppender } from '@kbn/logging';
|
||||
|
||||
import {
|
||||
LegacyAppender,
|
||||
LegacyAppenderConfig,
|
||||
} from '../../legacy/logging/appenders/legacy_appender';
|
||||
import { Layouts } from '../layouts/layouts';
|
||||
import { ConsoleAppender, ConsoleAppenderConfig } from './console/console_appender';
|
||||
import { FileAppender, FileAppenderConfig } from './file/file_appender';
|
||||
|
@ -32,7 +28,6 @@ import {
|
|||
export const appendersSchema = schema.oneOf([
|
||||
ConsoleAppender.configSchema,
|
||||
FileAppender.configSchema,
|
||||
LegacyAppender.configSchema,
|
||||
RewriteAppender.configSchema,
|
||||
RollingFileAppender.configSchema,
|
||||
]);
|
||||
|
@ -41,7 +36,6 @@ export const appendersSchema = schema.oneOf([
|
|||
export type AppenderConfigType =
|
||||
| ConsoleAppenderConfig
|
||||
| FileAppenderConfig
|
||||
| LegacyAppenderConfig
|
||||
| RewriteAppenderConfig
|
||||
| RollingFileAppenderConfig;
|
||||
|
||||
|
@ -64,8 +58,6 @@ export class Appenders {
|
|||
return new RewriteAppender(config);
|
||||
case 'rolling-file':
|
||||
return new RollingFileAppender(config);
|
||||
case 'legacy-appender':
|
||||
return new LegacyAppender(config.legacyLoggingConfig);
|
||||
|
||||
default:
|
||||
return assertNever(config);
|
||||
|
|
|
@ -14,7 +14,6 @@ import { Subject } from 'rxjs';
|
|||
function createRoot() {
|
||||
return kbnTestServer.createRoot({
|
||||
logging: {
|
||||
silent: true, // set "true" in kbnTestServer
|
||||
appenders: {
|
||||
'test-console': {
|
||||
type: 'console',
|
||||
|
|
|
@ -19,7 +19,6 @@ const flush = async () => delay(flushDelay);
|
|||
function createRoot(appenderConfig: any) {
|
||||
return kbnTestServer.createRoot({
|
||||
logging: {
|
||||
silent: true, // set "true" in kbnTestServer
|
||||
appenders: {
|
||||
'rolling-file': appenderConfig,
|
||||
},
|
||||
|
|
|
@ -9,35 +9,18 @@
|
|||
import { LoggingConfig, config } from './logging_config';
|
||||
|
||||
test('`schema` creates correct schema with defaults.', () => {
|
||||
expect(config.schema.validate({})).toMatchInlineSnapshot(
|
||||
{ json: expect.any(Boolean) }, // default value depends on TTY
|
||||
`
|
||||
expect(config.schema.validate({})).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"appenders": Map {},
|
||||
"dest": "stdout",
|
||||
"events": Object {},
|
||||
"filter": Object {},
|
||||
"json": Any<Boolean>,
|
||||
"loggers": Array [],
|
||||
"quiet": false,
|
||||
"root": Object {
|
||||
"appenders": Array [
|
||||
"default",
|
||||
],
|
||||
"level": "info",
|
||||
},
|
||||
"rotate": Object {
|
||||
"enabled": false,
|
||||
"everyBytes": 10485760,
|
||||
"keepFiles": 7,
|
||||
"pollingInterval": 10000,
|
||||
"usePolling": false,
|
||||
},
|
||||
"silent": false,
|
||||
"verbose": false,
|
||||
}
|
||||
`
|
||||
);
|
||||
`);
|
||||
});
|
||||
|
||||
test('`schema` throws if `root` logger does not have appenders configured.', () => {
|
||||
|
@ -52,16 +35,14 @@ test('`schema` throws if `root` logger does not have appenders configured.', ()
|
|||
);
|
||||
});
|
||||
|
||||
test('`schema` throws if `root` logger does not have "default" appender configured.', () => {
|
||||
test('`schema` does not throw if `root` logger does not have "default" appender configured.', () => {
|
||||
expect(() =>
|
||||
config.schema.validate({
|
||||
root: {
|
||||
appenders: ['console'],
|
||||
},
|
||||
})
|
||||
).toThrowErrorMatchingInlineSnapshot(
|
||||
`"[root]: \\"default\\" appender required for migration period till the next major release"`
|
||||
);
|
||||
).not.toThrow();
|
||||
});
|
||||
|
||||
test('`getParentLoggerContext()` returns correct parent context name.', () => {
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
*/
|
||||
|
||||
import { schema, TypeOf } from '@kbn/config-schema';
|
||||
import { legacyLoggingConfigSchema } from '@kbn/legacy-logging';
|
||||
import { AppenderConfigType, Appenders } from './appenders/appenders';
|
||||
|
||||
// We need this helper for the types to be correct
|
||||
|
@ -58,31 +57,23 @@ export const loggerSchema = schema.object({
|
|||
|
||||
/** @public */
|
||||
export type LoggerConfigType = TypeOf<typeof loggerSchema>;
|
||||
|
||||
export const config = {
|
||||
path: 'logging',
|
||||
schema: legacyLoggingConfigSchema.extends({
|
||||
schema: schema.object({
|
||||
appenders: schema.mapOf(schema.string(), Appenders.configSchema, {
|
||||
defaultValue: new Map<string, AppenderConfigType>(),
|
||||
}),
|
||||
loggers: schema.arrayOf(loggerSchema, {
|
||||
defaultValue: [],
|
||||
}),
|
||||
root: schema.object(
|
||||
{
|
||||
appenders: schema.arrayOf(schema.string(), {
|
||||
defaultValue: [DEFAULT_APPENDER_NAME],
|
||||
minSize: 1,
|
||||
}),
|
||||
level: levelSchema,
|
||||
},
|
||||
{
|
||||
validate(rawConfig) {
|
||||
if (!rawConfig.appenders.includes(DEFAULT_APPENDER_NAME)) {
|
||||
return `"${DEFAULT_APPENDER_NAME}" appender required for migration period till the next major release`;
|
||||
}
|
||||
},
|
||||
}
|
||||
),
|
||||
root: schema.object({
|
||||
appenders: schema.arrayOf(schema.string(), {
|
||||
defaultValue: [DEFAULT_APPENDER_NAME],
|
||||
minSize: 1,
|
||||
}),
|
||||
level: levelSchema,
|
||||
}),
|
||||
}),
|
||||
};
|
||||
|
||||
|
|
|
@ -15,11 +15,6 @@ jest.mock('fs', () => ({
|
|||
|
||||
const dynamicProps = { process: { pid: expect.any(Number) } };
|
||||
|
||||
jest.mock('@kbn/legacy-logging', () => ({
|
||||
...(jest.requireActual('@kbn/legacy-logging') as any),
|
||||
setupLoggingRotate: jest.fn().mockImplementation(() => Promise.resolve({})),
|
||||
}));
|
||||
|
||||
const timestamp = new Date(Date.UTC(2012, 1, 1, 14, 33, 22, 11));
|
||||
let mockConsoleLog: jest.SpyInstance;
|
||||
|
||||
|
|
|
@ -71,12 +71,11 @@ export interface AppCategory {
|
|||
|
||||
// Warning: (ae-forgotten-export) The symbol "ConsoleAppenderConfig" needs to be exported by the entry point index.d.ts
|
||||
// Warning: (ae-forgotten-export) The symbol "FileAppenderConfig" needs to be exported by the entry point index.d.ts
|
||||
// Warning: (ae-forgotten-export) The symbol "LegacyAppenderConfig" needs to be exported by the entry point index.d.ts
|
||||
// Warning: (ae-forgotten-export) The symbol "RewriteAppenderConfig" needs to be exported by the entry point index.d.ts
|
||||
// Warning: (ae-forgotten-export) The symbol "RollingFileAppenderConfig" needs to be exported by the entry point index.d.ts
|
||||
//
|
||||
// @public (undocumented)
|
||||
export type AppenderConfigType = ConsoleAppenderConfig | FileAppenderConfig | LegacyAppenderConfig | RewriteAppenderConfig | RollingFileAppenderConfig;
|
||||
export type AppenderConfigType = ConsoleAppenderConfig | FileAppenderConfig | RewriteAppenderConfig | RollingFileAppenderConfig;
|
||||
|
||||
// @public @deprecated
|
||||
export interface AsyncPlugin<TSetup = void, TStart = void, TPluginsSetup extends object = object, TPluginsStart extends object = object> {
|
||||
|
|
|
@ -7,32 +7,30 @@
|
|||
*/
|
||||
|
||||
import { httpServiceMock } from './http/http_service.mock';
|
||||
|
||||
export const mockHttpService = httpServiceMock.create();
|
||||
jest.doMock('./http/http_service', () => ({
|
||||
HttpService: jest.fn(() => mockHttpService),
|
||||
}));
|
||||
|
||||
import { pluginServiceMock } from './plugins/plugins_service.mock';
|
||||
|
||||
export const mockPluginsService = pluginServiceMock.create();
|
||||
jest.doMock('./plugins/plugins_service', () => ({
|
||||
PluginsService: jest.fn(() => mockPluginsService),
|
||||
}));
|
||||
|
||||
import { elasticsearchServiceMock } from './elasticsearch/elasticsearch_service.mock';
|
||||
|
||||
export const mockElasticsearchService = elasticsearchServiceMock.create();
|
||||
jest.doMock('./elasticsearch/elasticsearch_service', () => ({
|
||||
ElasticsearchService: jest.fn(() => mockElasticsearchService),
|
||||
}));
|
||||
|
||||
import { legacyServiceMock } from './legacy/legacy_service.mock';
|
||||
export const mockLegacyService = legacyServiceMock.create();
|
||||
jest.mock('./legacy/legacy_service', () => ({
|
||||
LegacyService: jest.fn(() => mockLegacyService),
|
||||
}));
|
||||
|
||||
const realKbnConfig = jest.requireActual('@kbn/config');
|
||||
|
||||
import { configServiceMock } from './config/mocks';
|
||||
|
||||
export const mockConfigService = configServiceMock.create();
|
||||
jest.doMock('@kbn/config', () => ({
|
||||
...realKbnConfig,
|
||||
|
@ -40,18 +38,21 @@ jest.doMock('@kbn/config', () => ({
|
|||
}));
|
||||
|
||||
import { savedObjectsServiceMock } from './saved_objects/saved_objects_service.mock';
|
||||
|
||||
export const mockSavedObjectsService = savedObjectsServiceMock.create();
|
||||
jest.doMock('./saved_objects/saved_objects_service', () => ({
|
||||
SavedObjectsService: jest.fn(() => mockSavedObjectsService),
|
||||
}));
|
||||
|
||||
import { contextServiceMock } from './context/context_service.mock';
|
||||
|
||||
export const mockContextService = contextServiceMock.create();
|
||||
jest.doMock('./context/context_service', () => ({
|
||||
ContextService: jest.fn(() => mockContextService),
|
||||
}));
|
||||
|
||||
import { uiSettingsServiceMock } from './ui_settings/ui_settings_service.mock';
|
||||
|
||||
export const mockUiSettingsService = uiSettingsServiceMock.create();
|
||||
jest.doMock('./ui_settings/ui_settings_service', () => ({
|
||||
UiSettingsService: jest.fn(() => mockUiSettingsService),
|
||||
|
@ -63,46 +64,54 @@ jest.doMock('./config/ensure_valid_configuration', () => ({
|
|||
}));
|
||||
|
||||
import { RenderingService, mockRenderingService } from './rendering/__mocks__/rendering_service';
|
||||
|
||||
export { mockRenderingService };
|
||||
jest.doMock('./rendering/rendering_service', () => ({ RenderingService }));
|
||||
|
||||
import { environmentServiceMock } from './environment/environment_service.mock';
|
||||
|
||||
export const mockEnvironmentService = environmentServiceMock.create();
|
||||
jest.doMock('./environment/environment_service', () => ({
|
||||
EnvironmentService: jest.fn(() => mockEnvironmentService),
|
||||
}));
|
||||
|
||||
import { metricsServiceMock } from './metrics/metrics_service.mock';
|
||||
|
||||
export const mockMetricsService = metricsServiceMock.create();
|
||||
jest.doMock('./metrics/metrics_service', () => ({
|
||||
MetricsService: jest.fn(() => mockMetricsService),
|
||||
}));
|
||||
|
||||
import { statusServiceMock } from './status/status_service.mock';
|
||||
|
||||
export const mockStatusService = statusServiceMock.create();
|
||||
jest.doMock('./status/status_service', () => ({
|
||||
StatusService: jest.fn(() => mockStatusService),
|
||||
}));
|
||||
|
||||
import { loggingServiceMock } from './logging/logging_service.mock';
|
||||
|
||||
export const mockLoggingService = loggingServiceMock.create();
|
||||
jest.doMock('./logging/logging_service', () => ({
|
||||
LoggingService: jest.fn(() => mockLoggingService),
|
||||
}));
|
||||
|
||||
import { i18nServiceMock } from './i18n/i18n_service.mock';
|
||||
|
||||
export const mockI18nService = i18nServiceMock.create();
|
||||
jest.doMock('./i18n/i18n_service', () => ({
|
||||
I18nService: jest.fn(() => mockI18nService),
|
||||
}));
|
||||
|
||||
import { prebootServiceMock } from './preboot/preboot_service.mock';
|
||||
|
||||
export const mockPrebootService = prebootServiceMock.create();
|
||||
jest.doMock('./preboot/preboot_service', () => ({
|
||||
PrebootService: jest.fn(() => mockPrebootService),
|
||||
}));
|
||||
|
||||
import { deprecationsServiceMock } from './deprecations/deprecations_service.mock';
|
||||
|
||||
export const mockDeprecationService = deprecationsServiceMock.create();
|
||||
jest.doMock('./deprecations/deprecations_service', () => ({
|
||||
DeprecationsService: jest.fn(() => mockDeprecationService),
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
import {
|
||||
mockElasticsearchService,
|
||||
mockHttpService,
|
||||
mockLegacyService,
|
||||
mockPluginsService,
|
||||
mockConfigService,
|
||||
mockSavedObjectsService,
|
||||
|
@ -95,7 +94,6 @@ test('sets up services on "setup"', async () => {
|
|||
expect(mockHttpService.setup).not.toHaveBeenCalled();
|
||||
expect(mockElasticsearchService.setup).not.toHaveBeenCalled();
|
||||
expect(mockPluginsService.setup).not.toHaveBeenCalled();
|
||||
expect(mockLegacyService.setup).not.toHaveBeenCalled();
|
||||
expect(mockSavedObjectsService.setup).not.toHaveBeenCalled();
|
||||
expect(mockUiSettingsService.setup).not.toHaveBeenCalled();
|
||||
expect(mockRenderingService.setup).not.toHaveBeenCalled();
|
||||
|
@ -111,7 +109,6 @@ test('sets up services on "setup"', async () => {
|
|||
expect(mockHttpService.setup).toHaveBeenCalledTimes(1);
|
||||
expect(mockElasticsearchService.setup).toHaveBeenCalledTimes(1);
|
||||
expect(mockPluginsService.setup).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.setup).toHaveBeenCalledTimes(1);
|
||||
expect(mockSavedObjectsService.setup).toHaveBeenCalledTimes(1);
|
||||
expect(mockUiSettingsService.setup).toHaveBeenCalledTimes(1);
|
||||
expect(mockRenderingService.setup).toHaveBeenCalledTimes(1);
|
||||
|
@ -199,7 +196,6 @@ test('stops services on "stop"', async () => {
|
|||
expect(mockHttpService.stop).not.toHaveBeenCalled();
|
||||
expect(mockElasticsearchService.stop).not.toHaveBeenCalled();
|
||||
expect(mockPluginsService.stop).not.toHaveBeenCalled();
|
||||
expect(mockLegacyService.stop).not.toHaveBeenCalled();
|
||||
expect(mockSavedObjectsService.stop).not.toHaveBeenCalled();
|
||||
expect(mockUiSettingsService.stop).not.toHaveBeenCalled();
|
||||
expect(mockMetricsService.stop).not.toHaveBeenCalled();
|
||||
|
@ -211,7 +207,6 @@ test('stops services on "stop"', async () => {
|
|||
expect(mockHttpService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockElasticsearchService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockPluginsService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockSavedObjectsService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockUiSettingsService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockMetricsService.stop).toHaveBeenCalledTimes(1);
|
||||
|
|
|
@ -21,7 +21,6 @@ import { ElasticsearchService } from './elasticsearch';
|
|||
import { HttpService } from './http';
|
||||
import { HttpResourcesService } from './http_resources';
|
||||
import { RenderingService } from './rendering';
|
||||
import { LegacyService } from './legacy';
|
||||
import { Logger, LoggerFactory, LoggingService, ILoggingSystem } from './logging';
|
||||
import { UiSettingsService } from './ui_settings';
|
||||
import { PluginsService, config as pluginsConfig } from './plugins';
|
||||
|
@ -69,7 +68,6 @@ export class Server {
|
|||
private readonly elasticsearch: ElasticsearchService;
|
||||
private readonly http: HttpService;
|
||||
private readonly rendering: RenderingService;
|
||||
private readonly legacy: LegacyService;
|
||||
private readonly log: Logger;
|
||||
private readonly plugins: PluginsService;
|
||||
private readonly savedObjects: SavedObjectsService;
|
||||
|
@ -108,7 +106,6 @@ export class Server {
|
|||
this.http = new HttpService(core);
|
||||
this.rendering = new RenderingService(core);
|
||||
this.plugins = new PluginsService(core);
|
||||
this.legacy = new LegacyService(core);
|
||||
this.elasticsearch = new ElasticsearchService(core);
|
||||
this.savedObjects = new SavedObjectsService(core);
|
||||
this.uiSettings = new UiSettingsService(core);
|
||||
|
@ -286,10 +283,6 @@ export class Server {
|
|||
const pluginsSetup = await this.plugins.setup(coreSetup);
|
||||
this.#pluginsInitialized = pluginsSetup.initialized;
|
||||
|
||||
await this.legacy.setup({
|
||||
http: httpSetup,
|
||||
});
|
||||
|
||||
this.registerCoreContext(coreSetup);
|
||||
this.coreApp.setup(coreSetup, uiPlugins);
|
||||
|
||||
|
@ -348,7 +341,6 @@ export class Server {
|
|||
public async stop() {
|
||||
this.log.debug('stopping server');
|
||||
|
||||
await this.legacy.stop();
|
||||
await this.http.stop(); // HTTP server has to stop before savedObjects and ES clients are closed to be able to gracefully attempt to resolve any pending requests
|
||||
await this.plugins.stop();
|
||||
await this.savedObjects.stop();
|
||||
|
|
|
@ -32,7 +32,11 @@ const DEFAULTS_SETTINGS = {
|
|||
port: 0,
|
||||
xsrf: { disableProtection: true },
|
||||
},
|
||||
logging: { silent: true },
|
||||
logging: {
|
||||
root: {
|
||||
level: 'off',
|
||||
},
|
||||
},
|
||||
plugins: {},
|
||||
migrations: { skip: false },
|
||||
};
|
||||
|
@ -45,7 +49,6 @@ export function createRootWithSettings(
|
|||
configs: [],
|
||||
cliArgs: {
|
||||
dev: false,
|
||||
silent: false,
|
||||
watch: false,
|
||||
basePath: false,
|
||||
runExamples: false,
|
||||
|
|
|
@ -82,24 +82,13 @@ kibana_vars=(
|
|||
logging.appenders
|
||||
logging.appenders.console
|
||||
logging.appenders.file
|
||||
logging.dest
|
||||
logging.json
|
||||
logging.loggers
|
||||
logging.loggers.appenders
|
||||
logging.loggers.level
|
||||
logging.loggers.name
|
||||
logging.quiet
|
||||
logging.root
|
||||
logging.root.appenders
|
||||
logging.root.level
|
||||
logging.rotate.enabled
|
||||
logging.rotate.everyBytes
|
||||
logging.rotate.keepFiles
|
||||
logging.rotate.pollingInterval
|
||||
logging.rotate.usePolling
|
||||
logging.silent
|
||||
logging.useUTC
|
||||
logging.verbose
|
||||
map.includeElasticMapsService
|
||||
map.proxyElasticMapsServiceInMaps
|
||||
map.regionmap
|
||||
|
|
|
@ -55,7 +55,6 @@ when setting an exact config or its parent path to `false`.
|
|||
"server.port": 5603,
|
||||
"server.basePath": "[redacted]",
|
||||
"server.rewriteBasePath": true,
|
||||
"logging.json": false,
|
||||
"usageCollection.uiCounters.debug": true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ export default function () {
|
|||
buildArgs: [],
|
||||
sourceArgs: ['--no-base-path', '--env.name=development'],
|
||||
serverArgs: [
|
||||
'--logging.json=false',
|
||||
`--server.port=${kbnTestConfig.getPort()}`,
|
||||
'--status.allowAnonymous=true',
|
||||
// We shouldn't embed credentials into the URL since Kibana requests to Elasticsearch should
|
||||
|
|
|
@ -51,7 +51,7 @@ export default async function ({ readConfigFile }: FtrConfigProviderContext) {
|
|||
runOptions: {
|
||||
...httpConfig.get('kbnTestServer.runOptions'),
|
||||
// Don't wait for Kibana to be completely ready so that we can test the status timeouts
|
||||
wait: /\[Kibana\]\[http\] http server running/,
|
||||
wait: /Kibana is now unavailable/,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1729,7 +1729,7 @@ describe('createConfig()', () => {
|
|||
},
|
||||
},
|
||||
})
|
||||
).toThrow('[audit.appender.2.type]: expected value to equal [legacy-appender]');
|
||||
).toThrow('[audit.appender.1.layout]: expected at least one defined value but got [undefined]');
|
||||
});
|
||||
|
||||
it('rejects an ignore_filter when no appender is configured', () => {
|
||||
|
|
30
yarn.lock
30
yarn.lock
|
@ -2404,15 +2404,6 @@
|
|||
async-retry "^1.2.3"
|
||||
strip-ansi "^5.2.0"
|
||||
|
||||
"@elastic/good@^9.0.1-kibana3":
|
||||
version "9.0.1-kibana3"
|
||||
resolved "https://registry.yarnpkg.com/@elastic/good/-/good-9.0.1-kibana3.tgz#a70c2b30cbb4f44d1cf4a464562e0680322eac9b"
|
||||
integrity sha512-UtPKr0TmlkL1abJfO7eEVUTqXWzLKjMkz+65FvxU/Ub9kMAr4No8wHLRfDHFzBkWoDIbDWygwld011WzUnea1Q==
|
||||
dependencies:
|
||||
"@hapi/hoek" "9.x.x"
|
||||
"@hapi/oppsy" "3.x.x"
|
||||
"@hapi/validate" "1.x.x"
|
||||
|
||||
"@elastic/makelogs@^6.0.0":
|
||||
version "6.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@elastic/makelogs/-/makelogs-6.0.0.tgz#d6d74d5d0f020123c54160370d49ca5e0aab1fe1"
|
||||
|
@ -2897,14 +2888,6 @@
|
|||
resolved "https://registry.yarnpkg.com/@hapi/file/-/file-2.0.0.tgz#2ecda37d1ae9d3078a67c13b7da86e8c3237dfb9"
|
||||
integrity sha512-WSrlgpvEqgPWkI18kkGELEZfXr0bYLtr16iIN4Krh9sRnzBZN6nnWxHFxtsnP684wueEySBbXPDg/WfA9xJdBQ==
|
||||
|
||||
"@hapi/good-squeeze@6.0.0":
|
||||
version "6.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@hapi/good-squeeze/-/good-squeeze-6.0.0.tgz#bb72d6869cd7398b615a6b7270f630dc4f76aebf"
|
||||
integrity sha512-UgHAF9Lm8fJPzgf2HymtowOwNc1+IL+p08YTVR+XA4d8nmyE1t9x3RLA4riqldnOKHkVqGakJ1jGqUG7jk77Cg==
|
||||
dependencies:
|
||||
"@hapi/hoek" "9.x.x"
|
||||
fast-safe-stringify "2.x.x"
|
||||
|
||||
"@hapi/h2o2@^9.1.0":
|
||||
version "9.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@hapi/h2o2/-/h2o2-9.1.0.tgz#b223f4978b6f2b0d7d9db10a84a567606c4c3551"
|
||||
|
@ -2992,13 +2975,6 @@
|
|||
"@hapi/hoek" "^9.0.4"
|
||||
"@hapi/vise" "^4.0.0"
|
||||
|
||||
"@hapi/oppsy@3.x.x":
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/@hapi/oppsy/-/oppsy-3.0.0.tgz#1ae397e200e86d0aa41055f103238ed8652947ca"
|
||||
integrity sha512-0kfUEAqIi21GzFVK2snMO07znMEBiXb+/pOx1dmgOO9TuvFstcfmHU5i56aDfiFP2DM5WzQCU2UWc2gK1lMDhQ==
|
||||
dependencies:
|
||||
"@hapi/hoek" "9.x.x"
|
||||
|
||||
"@hapi/pez@^5.0.1":
|
||||
version "5.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@hapi/pez/-/pez-5.0.3.tgz#b75446e6fef8cbb16816573ab7da1b0522e7a2a1"
|
||||
|
@ -3750,10 +3726,6 @@
|
|||
version "0.0.0"
|
||||
uid ""
|
||||
|
||||
"@kbn/legacy-logging@link:bazel-bin/packages/kbn-legacy-logging":
|
||||
version "0.0.0"
|
||||
uid ""
|
||||
|
||||
"@kbn/logging@link:bazel-bin/packages/kbn-logging":
|
||||
version "0.0.0"
|
||||
uid ""
|
||||
|
@ -14493,7 +14465,7 @@ fast-redact@^3.0.0:
|
|||
resolved "https://registry.yarnpkg.com/fast-redact/-/fast-redact-3.0.0.tgz#ac2f9e36c9f4976f5db9fb18c6ffbaf308cf316d"
|
||||
integrity sha512-a/S/Hp6aoIjx7EmugtzLqXmcNsyFszqbt6qQ99BdG61QjBZF6shNis0BYR6TsZOQ1twYc0FN2Xdhwwbv6+KD0w==
|
||||
|
||||
fast-safe-stringify@2.x.x, fast-safe-stringify@^2.0.4, fast-safe-stringify@^2.0.7:
|
||||
fast-safe-stringify@^2.0.4, fast-safe-stringify@^2.0.7:
|
||||
version "2.0.8"
|
||||
resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.0.8.tgz#dc2af48c46cf712b683e849b2bbd446b32de936f"
|
||||
integrity sha512-lXatBjf3WPjmWD6DpIZxkeSsCOwqI0maYMpgDlx8g4U2qi4lbjA9oH/HD2a87G+KfsUmo5WbJFmqBZlPxtptag==
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue