mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 01:38:56 -04:00
[APM] Refactor synthtrace (#147036)
Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
parent
503b466b72
commit
c3d5b0279d
211 changed files with 4049 additions and 6503 deletions
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -887,6 +887,7 @@ packages/kbn-ambient-ui-types @elastic/kibana-operations
|
|||
packages/kbn-analytics @elastic/kibana-core
|
||||
packages/kbn-apm-config-loader @elastic/kibana-core @vigneshshanmugam
|
||||
packages/kbn-apm-synthtrace @elastic/apm-ui
|
||||
packages/kbn-apm-synthtrace-client @elastic/apm-ui
|
||||
packages/kbn-apm-utils @elastic/apm-ui
|
||||
packages/kbn-axe-config @elastic/kibana-qa
|
||||
packages/kbn-babel-plugin-package-imports @elastic/kibana-operations
|
||||
|
|
|
@ -740,6 +740,7 @@
|
|||
"@kbn/ambient-storybook-types": "link:packages/kbn-ambient-storybook-types",
|
||||
"@kbn/ambient-ui-types": "link:packages/kbn-ambient-ui-types",
|
||||
"@kbn/apm-synthtrace": "link:packages/kbn-apm-synthtrace",
|
||||
"@kbn/apm-synthtrace-client": "link:packages/kbn-apm-synthtrace-client",
|
||||
"@kbn/axe-config": "link:packages/kbn-axe-config",
|
||||
"@kbn/babel-plugin-package-imports": "link:packages/kbn-babel-plugin-package-imports",
|
||||
"@kbn/babel-preset": "link:packages/kbn-babel-preset",
|
||||
|
@ -870,6 +871,7 @@
|
|||
"@types/fetch-mock": "^7.3.1",
|
||||
"@types/file-saver": "^2.0.0",
|
||||
"@types/flot": "^0.0.31",
|
||||
"@types/fnv-plus": "^1.3.0",
|
||||
"@types/geojson": "7946.0.7",
|
||||
"@types/getos": "^3.0.0",
|
||||
"@types/gulp": "^4.0.6",
|
||||
|
@ -912,6 +914,7 @@
|
|||
"@types/minimist": "^1.2.2",
|
||||
"@types/mock-fs": "^4.13.1",
|
||||
"@types/moment-duration-format": "^2.2.3",
|
||||
"@types/multistream": "^4.1.0",
|
||||
"@types/mustache": "^0.8.31",
|
||||
"@types/nock": "^10.0.3",
|
||||
"@types/node": "16.11.41",
|
||||
|
@ -926,6 +929,7 @@
|
|||
"@types/pbf": "3.0.2",
|
||||
"@types/pdfmake": "^0.2.2",
|
||||
"@types/pegjs": "^0.10.1",
|
||||
"@types/pidusage": "^2.0.2",
|
||||
"@types/pixelmatch": "^5.2.4",
|
||||
"@types/pngjs": "^3.4.0",
|
||||
"@types/prettier": "^2.3.2",
|
||||
|
@ -1053,6 +1057,7 @@
|
|||
"faker": "^5.1.0",
|
||||
"fetch-mock": "^7.3.9",
|
||||
"file-loader": "^4.2.0",
|
||||
"fnv-plus": "^1.3.1",
|
||||
"form-data": "^4.0.0",
|
||||
"geckodriver": "^3.2.0",
|
||||
"gulp-brotli": "^3.0.0",
|
||||
|
@ -1098,7 +1103,9 @@
|
|||
"mochawesome-merge": "^4.2.1",
|
||||
"mock-fs": "^5.1.2",
|
||||
"ms-chromium-edge-driver": "^0.5.1",
|
||||
"multistream": "^4.1.0",
|
||||
"mutation-observer": "^1.0.3",
|
||||
"native-hdr-histogram": "^1.0.0",
|
||||
"nock": "12.0.3",
|
||||
"node-sass": "^8.0.0",
|
||||
"null-loader": "^3.0.0",
|
||||
|
@ -1107,6 +1114,7 @@
|
|||
"openapi-types": "^10.0.0",
|
||||
"pbf": "3.2.1",
|
||||
"peggy": "^1.2.0",
|
||||
"pidusage": "^3.0.2",
|
||||
"pirates": "^4.0.1",
|
||||
"piscina": "^3.2.0",
|
||||
"pixelmatch": "^5.3.0",
|
||||
|
|
1
packages/kbn-apm-synthtrace-client/README.md
Normal file
1
packages/kbn-apm-synthtrace-client/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
See packages/kbn-apm-synthtrace/README.json
|
32
packages/kbn-apm-synthtrace-client/index.ts
Normal file
32
packages/kbn-apm-synthtrace-client/index.ts
Normal file
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export { observer } from './src/lib/agent_config';
|
||||
export type { AgentConfigFields } from './src/lib/agent_config/agent_config_fields';
|
||||
export { apm } from './src/lib/apm';
|
||||
export type { ApmFields } from './src/lib/apm/apm_fields';
|
||||
export type { Instance } from './src/lib/apm/instance';
|
||||
export { MobileDevice } from './src/lib/apm/mobile_device';
|
||||
export type {
|
||||
DeviceInfo,
|
||||
GeoInfo,
|
||||
NetworkConnectionInfo,
|
||||
OSInfo,
|
||||
} from './src/lib/apm/mobile_device';
|
||||
export { httpExitSpan } from './src/lib/apm/span';
|
||||
export { DistributedTrace } from './src/lib/dsl/distributed_trace_client';
|
||||
export type { Fields } from './src/lib/entity';
|
||||
export type { Serializable } from './src/lib/serializable';
|
||||
export { timerange } from './src/lib/timerange';
|
||||
export type { Timerange } from './src/lib/timerange';
|
||||
export { generateLongId, generateShortId } from './src/lib/utils/generate_id';
|
||||
export { appendHash, hashKeysOf } from './src/lib/utils/hash';
|
||||
export { dedot } from './src/lib/utils/dedot';
|
||||
export type { ESDocumentWithOperation, SynthtraceESAction, SynthtraceGenerator } from './src/types';
|
||||
|
||||
export { parseInterval } from './src/lib/interval';
|
|
@ -5,12 +5,11 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
export function createPicker(fields: string[]) {
|
||||
const wildcards = fields
|
||||
.filter((field) => field.endsWith('.*'))
|
||||
.map((field) => field.replace('*', ''));
|
||||
|
||||
return (value: unknown, key: string) => {
|
||||
return fields.includes(key) || wildcards.some((field) => key.startsWith(field));
|
||||
};
|
||||
}
|
||||
module.exports = {
|
||||
preset: '@kbn/test',
|
||||
rootDir: '../..',
|
||||
roots: ['<rootDir>/packages/kbn-apm-synthtrace-client'],
|
||||
setupFiles: [],
|
||||
setupFilesAfterEnv: [],
|
||||
};
|
6
packages/kbn-apm-synthtrace-client/kibana.jsonc
Normal file
6
packages/kbn-apm-synthtrace-client/kibana.jsonc
Normal file
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"type": "shared-common",
|
||||
"id": "@kbn/apm-synthtrace-client",
|
||||
"devOnly": true,
|
||||
"owner": "@elastic/apm-ui"
|
||||
}
|
7
packages/kbn-apm-synthtrace-client/package.json
Normal file
7
packages/kbn-apm-synthtrace-client/package.json
Normal file
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"name": "@kbn/apm-synthtrace-client",
|
||||
"version": "0.1.0",
|
||||
"description": "Elastic APM trace data generator",
|
||||
"license": "SSPL-1.0 OR Elastic License 2.0",
|
||||
"private": true
|
||||
}
|
|
@ -5,8 +5,9 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { cluster } from './cluster';
|
||||
|
||||
export const stackMonitoring = {
|
||||
cluster,
|
||||
module.exports = {
|
||||
rules: {
|
||||
'import/no-default-export': 'off',
|
||||
},
|
||||
};
|
|
@ -14,7 +14,9 @@ export type AgentConfigFields = Pick<
|
|||
| 'processor.event'
|
||||
| 'processor.name'
|
||||
| 'metricset.name'
|
||||
| 'observer'
|
||||
| 'observer.version'
|
||||
| 'observer.type'
|
||||
| 'observer.version_major'
|
||||
| 'ecs.version'
|
||||
| 'event.ingested'
|
||||
> &
|
|
@ -46,49 +46,66 @@ export interface GeoLocation {
|
|||
type: string;
|
||||
}
|
||||
|
||||
export type ApmFields = Fields &
|
||||
type ExperimentalFields = Partial<{
|
||||
'metricset.interval': string;
|
||||
'transaction.duration.summary': string;
|
||||
}>;
|
||||
|
||||
export type ApmFields = Fields<{
|
||||
'metricset.id': string;
|
||||
}> &
|
||||
Partial<{
|
||||
'timestamp.us'?: number;
|
||||
|
||||
'agent.name': string;
|
||||
'agent.version': string;
|
||||
'client.geo.city_name': string;
|
||||
'client.geo.continent_name': string;
|
||||
'client.geo.country_iso_code': string;
|
||||
'client.geo.country_name': string;
|
||||
'client.geo.location': GeoLocation;
|
||||
'client.geo.region_iso_code': string;
|
||||
'client.geo.region_name': string;
|
||||
'client.geo.location': GeoLocation;
|
||||
'client.ip': string;
|
||||
'cloud.provider': string;
|
||||
'cloud.project.name': string;
|
||||
'cloud.service.name': string;
|
||||
'cloud.account.id': string;
|
||||
'cloud.account.name': string;
|
||||
'cloud.availability_zone': string;
|
||||
'cloud.machine.type': string;
|
||||
'cloud.project.id': string;
|
||||
'cloud.project.name': string;
|
||||
'cloud.provider': string;
|
||||
'cloud.region': string;
|
||||
'cloud.service.name': string;
|
||||
'container.id': string;
|
||||
'destination.address': string;
|
||||
'destination.port': number;
|
||||
'device.id': string;
|
||||
'device.manufacturer': string;
|
||||
'device.model.identifier': string;
|
||||
'device.model.name': string;
|
||||
'device.manufacturer': string;
|
||||
'ecs.version': string;
|
||||
'event.outcome': string;
|
||||
'event.name': string;
|
||||
'event.ingested': number;
|
||||
'error.id': string;
|
||||
'error.exception': ApmException[];
|
||||
'error.grouping_name': string;
|
||||
'error.grouping_key': string;
|
||||
'faas.id': string;
|
||||
'faas.name': string;
|
||||
'error.grouping_name': string;
|
||||
'error.id': string;
|
||||
'event.ingested': number;
|
||||
'event.name': string;
|
||||
'event.outcome': string;
|
||||
'event.outcome_numeric':
|
||||
| number
|
||||
| {
|
||||
sum: number;
|
||||
value_count: number;
|
||||
};
|
||||
'faas.coldstart': boolean;
|
||||
'faas.execution': string;
|
||||
'faas.id': string;
|
||||
'faas.name': string;
|
||||
'faas.trigger.type': string;
|
||||
'faas.trigger.request_id': string;
|
||||
'host.name': string;
|
||||
'faas.version': string;
|
||||
'host.architecture': string;
|
||||
'host.hostname': string;
|
||||
'host.name': string;
|
||||
'host.os.full': string;
|
||||
'host.os.name': string;
|
||||
'host.os.platform': string;
|
||||
|
@ -96,59 +113,64 @@ export type ApmFields = Fields &
|
|||
'host.os.version': string;
|
||||
'http.request.method': string;
|
||||
'http.response.status_code': number;
|
||||
'kubernetes.pod.uid': string;
|
||||
'kubernetes.pod.name': string;
|
||||
'kubernetes.pod.uid': string;
|
||||
'metricset.name': string;
|
||||
observer: Observer;
|
||||
'network.connection.type': string;
|
||||
'network.connection.subtype': string;
|
||||
'network.carrier.name': string;
|
||||
'network.carrier.icc': string;
|
||||
'network.carrier.mcc': string;
|
||||
'network.carrier.mnc': string;
|
||||
'network.carrier.icc': string;
|
||||
'network.carrier.name': string;
|
||||
'network.connection.subtype': string;
|
||||
'network.connection.type': string;
|
||||
'observer.type': string;
|
||||
'observer.version_major': number;
|
||||
'observer.version': string;
|
||||
'parent.id': string;
|
||||
'processor.event': string;
|
||||
'processor.name': string;
|
||||
'session.id': string;
|
||||
'trace.id': string;
|
||||
'transaction.duration.us': number;
|
||||
'transaction.id': string;
|
||||
'transaction.name': string;
|
||||
'transaction.type': string;
|
||||
'transaction.id': string;
|
||||
'transaction.duration.us': number;
|
||||
'transaction.duration.histogram': {
|
||||
values: number[];
|
||||
counts: number[];
|
||||
};
|
||||
'transaction.sampled': true;
|
||||
'service.name': string;
|
||||
'service.version': string;
|
||||
'service.environment': string;
|
||||
'service.framework.name': string;
|
||||
'service.framework.version': string;
|
||||
'service.language.name': string;
|
||||
'service.language.version': string;
|
||||
'service.name': string;
|
||||
'service.node.name': string;
|
||||
'service.runtime.name': string;
|
||||
'service.runtime.version': string;
|
||||
'service.framework.name': string;
|
||||
'service.framework.version': string;
|
||||
'service.target.name': string;
|
||||
'service.target.type': string;
|
||||
'service.version': string;
|
||||
'span.action': string;
|
||||
'span.destination.service.resource': string;
|
||||
'span.destination.service.response_time.count': number;
|
||||
'span.destination.service.response_time.sum.us': number;
|
||||
'span.duration.us': number;
|
||||
'span.id': string;
|
||||
'span.name': string;
|
||||
'span.type': string;
|
||||
'span.subtype': string;
|
||||
'span.duration.us': number;
|
||||
'span.destination.service.resource': string;
|
||||
'span.destination.service.response_time.sum.us': number;
|
||||
'span.destination.service.response_time.count': number;
|
||||
'span.self_time.count': number;
|
||||
'span.self_time.sum.us': number;
|
||||
'span.subtype': string;
|
||||
'span.type': string;
|
||||
'transaction.result': string;
|
||||
'transaction.sampled': true;
|
||||
'span.links': Array<{
|
||||
trace: { id: string };
|
||||
span: { id: string };
|
||||
}>;
|
||||
'url.original': string;
|
||||
}> &
|
||||
ApmApplicationMetricFields;
|
||||
ApmApplicationMetricFields &
|
||||
ExperimentalFields;
|
||||
|
||||
export type SpanParams = {
|
||||
spanName: string;
|
|
@ -9,13 +9,7 @@ import { service } from './service';
|
|||
import { mobileApp } from './mobile_app';
|
||||
import { browser } from './browser';
|
||||
import { serverlessFunction } from './serverless_function';
|
||||
import { getTransactionMetrics } from './processors/get_transaction_metrics';
|
||||
import { getSpanDestinationMetrics } from './processors/get_span_destination_metrics';
|
||||
import { getChromeUserAgentDefaults } from './defaults/get_chrome_user_agent_defaults';
|
||||
import { getBreakdownMetrics } from './processors/get_breakdown_metrics';
|
||||
import { getApmWriteTargets } from './utils/get_apm_write_targets';
|
||||
import { ApmSynthtraceEsClient } from './client/apm_synthtrace_es_client';
|
||||
import { ApmSynthtraceKibanaClient } from './client/apm_synthtrace_kibana_client';
|
||||
|
||||
import type { ApmException } from './apm_fields';
|
||||
|
||||
|
@ -23,14 +17,8 @@ export const apm = {
|
|||
service,
|
||||
mobileApp,
|
||||
browser,
|
||||
getTransactionMetrics,
|
||||
getSpanDestinationMetrics,
|
||||
getChromeUserAgentDefaults,
|
||||
getBreakdownMetrics,
|
||||
getApmWriteTargets,
|
||||
ApmSynthtraceEsClient,
|
||||
ApmSynthtraceKibanaClient,
|
||||
serverlessFunction,
|
||||
};
|
||||
|
||||
export type { ApmSynthtraceEsClient, ApmException };
|
||||
export type { ApmException };
|
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { pick } from 'lodash';
|
||||
import { hashKeysOf } from '../../utils/hash';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
|
||||
const KEY_FIELDS: Array<keyof ApmFields> = [
|
||||
'container.id',
|
||||
'kubernetes.pod.name',
|
||||
'kubernetes.pod.uid',
|
||||
'agent.name',
|
||||
'agent.version',
|
||||
'cloud.account.id',
|
||||
'cloud.account.name',
|
||||
'cloud.availability_zone',
|
||||
'cloud.machine.type',
|
||||
'cloud.project.id',
|
||||
'cloud.project.name',
|
||||
'cloud.provider',
|
||||
'cloud.region',
|
||||
'cloud.service.name',
|
||||
'service.name',
|
||||
'service.environment',
|
||||
'service.framework.name',
|
||||
'service.language.name',
|
||||
'service.language.version',
|
||||
'service.name',
|
||||
'service.node.name',
|
||||
'service.runtime.name',
|
||||
'service.runtime.version',
|
||||
'host.architecture',
|
||||
'host.hostname',
|
||||
'host.name',
|
||||
'host.os.platform',
|
||||
'transaction.type',
|
||||
'transaction.name',
|
||||
'span.type',
|
||||
'span.subtype',
|
||||
];
|
||||
|
||||
export function getBreakdownMetrics(events: ApmFields[]): ApmFields[] {
|
||||
const [transaction] = events;
|
||||
|
||||
const metricsets: Map<string, ApmFields> = new Map();
|
||||
|
||||
const eventsById: Record<string, ApmFields> = {};
|
||||
const activityByParentId: Record<string, Array<{ from: number; to: number }>> = {};
|
||||
for (const event of events) {
|
||||
const id =
|
||||
event['processor.event'] === 'transaction' ? event['transaction.id'] : event['span.id'];
|
||||
eventsById[id!] = event;
|
||||
|
||||
const parentId = event['parent.id'];
|
||||
|
||||
if (!parentId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!activityByParentId[parentId]) {
|
||||
activityByParentId[parentId] = [];
|
||||
}
|
||||
|
||||
const from = event['@timestamp']! * 1000;
|
||||
const to =
|
||||
from +
|
||||
(event['processor.event'] === 'transaction'
|
||||
? event['transaction.duration.us']!
|
||||
: event['span.duration.us']!);
|
||||
|
||||
activityByParentId[parentId].push({ from, to });
|
||||
}
|
||||
|
||||
// eslint-disable-next-line guard-for-in
|
||||
for (const id in eventsById) {
|
||||
const event = eventsById[id];
|
||||
const activities = activityByParentId[id] || [];
|
||||
|
||||
const timeStart = event['@timestamp']! * 1000;
|
||||
|
||||
let selfTime = 0;
|
||||
let lastMeasurement = timeStart;
|
||||
const changeTimestamps = Array.from(
|
||||
new Set([
|
||||
timeStart,
|
||||
...activities.flatMap((activity) => [activity.from, activity.to]),
|
||||
timeStart +
|
||||
(event['processor.event'] === 'transaction'
|
||||
? event['transaction.duration.us']!
|
||||
: event['span.duration.us']!),
|
||||
])
|
||||
);
|
||||
|
||||
for (const timestamp of changeTimestamps) {
|
||||
const hasActiveChildren = activities.some(
|
||||
(activity) => activity.from < timestamp && activity.to >= timestamp
|
||||
);
|
||||
|
||||
if (!hasActiveChildren) {
|
||||
selfTime += timestamp - lastMeasurement;
|
||||
}
|
||||
|
||||
lastMeasurement = timestamp;
|
||||
}
|
||||
|
||||
const key = {
|
||||
...pick(event, KEY_FIELDS),
|
||||
'transaction.type': transaction['transaction.type'],
|
||||
'transaction.name': transaction['transaction.name'],
|
||||
};
|
||||
|
||||
const metricsetId = hashKeysOf(key, KEY_FIELDS);
|
||||
|
||||
let metricset = metricsets.get(metricsetId);
|
||||
|
||||
if (!metricset) {
|
||||
metricset = {
|
||||
...key,
|
||||
'@timestamp': Math.floor(event['@timestamp']! / (30 * 1000)) * 30 * 1000,
|
||||
'processor.event': 'metric',
|
||||
'processor.name': 'metric',
|
||||
'metricset.name': `span_breakdown`,
|
||||
'span.self_time.count': 0,
|
||||
'span.self_time.sum.us': 0,
|
||||
// store the generated metricset id for performance reasons (used in the breakdown metrics aggregator)
|
||||
meta: {
|
||||
'metricset.id': metricsetId,
|
||||
},
|
||||
};
|
||||
|
||||
if (event['processor.event'] === 'transaction') {
|
||||
metricset['span.type'] = 'app';
|
||||
} else {
|
||||
metricset['span.type'] = event['span.type'];
|
||||
metricset['span.subtype'] = event['span.subtype'];
|
||||
}
|
||||
|
||||
metricsets.set(metricsetId, metricset);
|
||||
}
|
||||
|
||||
metricset['span.self_time.count']!++;
|
||||
metricset['span.self_time.sum.us']! += selfTime;
|
||||
}
|
||||
|
||||
return Array.from(metricsets.values());
|
||||
}
|
|
@ -5,8 +5,6 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import url from 'url';
|
||||
import { BaseSpan } from './base_span';
|
||||
import { generateShortId } from '../utils/generate_id';
|
||||
import { ApmFields, SpanParams } from './apm_fields';
|
||||
|
@ -49,7 +47,7 @@ export function httpExitSpan({
|
|||
// host: 'opbeans-go:3000',
|
||||
// hostname: 'opbeans-go',
|
||||
// port: '3000',
|
||||
const destination = new url.URL(destinationUrl);
|
||||
const destination = new URL(destinationUrl);
|
||||
|
||||
const spanType = 'external';
|
||||
const spanSubtype = 'http';
|
|
@ -10,6 +10,7 @@ import { ApmError } from './apm_error';
|
|||
import { BaseSpan } from './base_span';
|
||||
import { generateShortId } from '../utils/generate_id';
|
||||
import { ApmFields } from './apm_fields';
|
||||
import { getBreakdownMetrics } from './processors/get_breakdown_metrics';
|
||||
|
||||
export class Transaction extends BaseSpan {
|
||||
private _sampled: boolean = true;
|
||||
|
@ -64,11 +65,16 @@ export class Transaction extends BaseSpan {
|
|||
|
||||
const errors = this._errors.flatMap((error) => error.serialize());
|
||||
|
||||
const directChildren = this.getChildren().map((child) => child.fields);
|
||||
|
||||
const events = [transaction];
|
||||
|
||||
const breakdownMetrics = getBreakdownMetrics(events.concat(directChildren));
|
||||
|
||||
if (this._sampled) {
|
||||
events.push(...spans);
|
||||
}
|
||||
|
||||
return events.concat(errors);
|
||||
return events.concat(errors).concat(breakdownMetrics);
|
||||
}
|
||||
}
|
|
@ -6,9 +6,9 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export interface Fields {
|
||||
export type Fields<TMeta extends Record<string, any> | undefined = undefined> = {
|
||||
'@timestamp'?: number;
|
||||
}
|
||||
} & (TMeta extends undefined ? {} : Partial<{ meta: TMeta }>);
|
||||
|
||||
export class Entity<TFields extends Fields> {
|
||||
constructor(public readonly fields: TFields) {
|
88
packages/kbn-apm-synthtrace-client/src/lib/interval.ts
Normal file
88
packages/kbn-apm-synthtrace-client/src/lib/interval.ts
Normal file
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { castArray } from 'lodash';
|
||||
import moment, { unitOfTime } from 'moment';
|
||||
import { SynthtraceGenerator } from '../types';
|
||||
import { Fields } from './entity';
|
||||
import { Serializable } from './serializable';
|
||||
|
||||
export function parseInterval(interval: string): {
|
||||
intervalAmount: number;
|
||||
intervalUnit: unitOfTime.DurationConstructor;
|
||||
} {
|
||||
const args = interval.match(/(\d+)(s|m|h|d)/);
|
||||
if (!args || args.length < 3) {
|
||||
throw new Error('Failed to parse interval');
|
||||
}
|
||||
return {
|
||||
intervalAmount: Number(args[1]),
|
||||
intervalUnit: args[2] as unitOfTime.DurationConstructor,
|
||||
};
|
||||
}
|
||||
|
||||
interface IntervalOptions {
|
||||
from: Date;
|
||||
to: Date;
|
||||
interval: string;
|
||||
rate?: number;
|
||||
}
|
||||
|
||||
export class Interval<TFields = Fields> {
|
||||
private readonly intervalAmount: number;
|
||||
private readonly intervalUnit: unitOfTime.DurationConstructor;
|
||||
|
||||
private readonly _rate: number;
|
||||
constructor(private readonly options: IntervalOptions) {
|
||||
const { intervalAmount, intervalUnit } = parseInterval(options.interval);
|
||||
this.intervalAmount = intervalAmount;
|
||||
this.intervalUnit = intervalUnit;
|
||||
this._rate = options.rate || 1;
|
||||
}
|
||||
|
||||
private getTimestamps() {
|
||||
const from = this.options.from.getTime();
|
||||
const to = this.options.to.getTime();
|
||||
|
||||
let time: number = from;
|
||||
const diff = moment.duration(this.intervalAmount, this.intervalUnit).asMilliseconds();
|
||||
|
||||
const timestamps: number[] = [];
|
||||
|
||||
const rates = new Array(this._rate);
|
||||
|
||||
while (time < to) {
|
||||
timestamps.push(...rates.fill(time));
|
||||
time += diff;
|
||||
}
|
||||
|
||||
return timestamps;
|
||||
}
|
||||
|
||||
*generator<TGeneratedFields = TFields>(
|
||||
map: (
|
||||
timestamp: number,
|
||||
index: number
|
||||
) => Serializable<TGeneratedFields> | Array<Serializable<TGeneratedFields>>
|
||||
): SynthtraceGenerator<TGeneratedFields> {
|
||||
const timestamps = this.getTimestamps();
|
||||
|
||||
let index = 0;
|
||||
|
||||
for (const timestamp of timestamps) {
|
||||
const events = castArray(map(timestamp, index));
|
||||
index++;
|
||||
for (const event of events) {
|
||||
yield event;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rate(rate: number): Interval {
|
||||
return new Interval({ ...this.options, rate });
|
||||
}
|
||||
}
|
|
@ -15,20 +15,8 @@ export class Timerange {
|
|||
return new Interval({ from: this.from, to: this.to, interval });
|
||||
}
|
||||
|
||||
ratePerMinute(rateInTpm: number) {
|
||||
const intervalPerSecond = Math.max(1, 60 / rateInTpm);
|
||||
|
||||
// rate per second
|
||||
let interval = `${intervalPerSecond}s`;
|
||||
let rate = (rateInTpm / 60) * intervalPerSecond;
|
||||
|
||||
// rate per minute
|
||||
if (!Number.isInteger(rate) || !Number.isInteger(intervalPerSecond)) {
|
||||
interval = '1m';
|
||||
rate = rate * 60;
|
||||
}
|
||||
|
||||
return new Interval({ from: this.from, to: this.to, interval, yieldRate: rate });
|
||||
ratePerMinute(rate: number) {
|
||||
return this.interval(`1m`).rate(rate);
|
||||
}
|
||||
}
|
||||
|
22
packages/kbn-apm-synthtrace-client/src/lib/utils/hash.ts
Normal file
22
packages/kbn-apm-synthtrace-client/src/lib/utils/hash.ts
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { fast1a32 } from 'fnv-plus';
|
||||
import { Fields } from '../entity';
|
||||
|
||||
export function hashKeysOf<T extends Fields>(source: T, keys: Array<keyof T>) {
|
||||
let hashed: string = '';
|
||||
for (const key of keys) {
|
||||
const value = String(source[key] || '');
|
||||
hashed = appendHash(hashed, value || '');
|
||||
}
|
||||
return hashed;
|
||||
}
|
||||
|
||||
export function appendHash(hash: string, value: string) {
|
||||
return fast1a32(hash + ',' + value).toString();
|
||||
}
|
24
packages/kbn-apm-synthtrace-client/src/types/index.ts
Normal file
24
packages/kbn-apm-synthtrace-client/src/types/index.ts
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { BulkCreateOperation, BulkIndexOperation } from '@elastic/elasticsearch/lib/api/types';
|
||||
import { Fields } from '../lib/entity';
|
||||
import { Serializable } from '../lib/serializable';
|
||||
|
||||
export type SynthtraceESAction = { create: BulkCreateOperation } | { index: BulkIndexOperation };
|
||||
|
||||
export type ESDocumentWithOperation<TFields extends Fields> = {
|
||||
_index?: string;
|
||||
_action?: SynthtraceESAction;
|
||||
} & TFields;
|
||||
|
||||
export type SynthtraceGenerator<TFields extends Fields> = Generator<Serializable<TFields>>;
|
||||
|
||||
export type SynthtraceProcessor<TFields extends Fields> = (
|
||||
fields: ESDocumentWithOperation<TFields>
|
||||
) => ESDocumentWithOperation<TFields>;
|
15
packages/kbn-apm-synthtrace-client/tsconfig.json
Normal file
15
packages/kbn-apm-synthtrace-client/tsconfig.json
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "target/types",
|
||||
"types": ["jest", "node"],
|
||||
"lib": [
|
||||
"ES2019"
|
||||
],
|
||||
"esModuleInterop": true
|
||||
},
|
||||
"include": ["**/*.ts"],
|
||||
"exclude": [
|
||||
"target/**/*",
|
||||
]
|
||||
}
|
|
@ -27,7 +27,9 @@ This library can currently be used in two ways:
|
|||
```ts
|
||||
import { service, timerange, toElasticsearchOutput } from '@kbn/apm-synthtrace';
|
||||
|
||||
const instance = service({name: 'synth-go', environment: 'production', agentName: 'go'}).instance('instance-a');
|
||||
const instance = service({ name: 'synth-go', environment: 'production', agentName: 'go' }).instance(
|
||||
'instance-a'
|
||||
);
|
||||
|
||||
const from = new Date('2021-01-01T12:00:00.000Z').getTime();
|
||||
const to = new Date('2021-01-01T12:00:00.000Z').getTime();
|
||||
|
@ -37,7 +39,7 @@ const traceEvents = timerange(from, to)
|
|||
.rate(10)
|
||||
.flatMap((timestamp) =>
|
||||
instance
|
||||
.transaction({transactionName: 'GET /api/product/list'})
|
||||
.transaction({ transactionName: 'GET /api/product/list' })
|
||||
.timestamp(timestamp)
|
||||
.duration(1000)
|
||||
.success()
|
||||
|
@ -111,50 +113,40 @@ The following options are supported:
|
|||
|
||||
### Connection options
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| ------------ | --------- | :--------- | ------------------------------------------------------------------------------------------------------- |
|
||||
| `--target` | [string] | | Elasticsearch target |
|
||||
| `--kibana` | [string] | | Kibana target, used to bootstrap datastreams/mappings/templates/settings |
|
||||
| `--cloudId` | [string] | | Provide connection information and will force APM on the cloud to migrate to run as a Fleet integration |
|
||||
| `--local` | [boolean] | | Shortcut during development, assumes `yarn es snapshot` and `yarn start` are running |
|
||||
| `--username` | [string] | `elastic` | Basic authentication username |
|
||||
| `--password` | [string] | `changeme` | Basic authentication password |
|
||||
| Option | Type | Default | Description |
|
||||
| ------------------- | -------- | :------ | ------------------------------------------------------------------------------------------ |
|
||||
| `--target` | [string] | | Elasticsearch target |
|
||||
| `--kibana` | [string] | | Kibana target, used to bootstrap datastreams/mappings/templates/settings |
|
||||
| `--versionOverride` | [string] | | String to be used for `observer.version`. Defauls to the version of the installed package. |
|
||||
|
||||
Note:
|
||||
|
||||
- If you only specify `--target` Synthtrace can not automatically setup APM.
|
||||
- If you specify both `--target` and `--kibana` the tool will automatically attempt to install the appropriate APM package
|
||||
- For Cloud its easiest to specify `--cloudId` as it will unpack the ES/Kibana targets and migrate cloud over to managed APM automatically.
|
||||
- If you only specify `--kibana` and it's using a cloud hostname a very naive `--target` to Elasticsearch will be inferred.
|
||||
- If `--target` is not set, Synthtrace will try to detect a locally running Elasticsearch and Kibana.
|
||||
- For Elastic Cloud urls, `--target` will be used to infer the location of the Cloud instance of Kibana.
|
||||
- The latest version of the APM integration will automatically be installed and used for `observer.version` when ingesting APM data. In some cases,
|
||||
you'll want to use `--versionOverride` to set `observer.version` explicitly.
|
||||
|
||||
### Scenario options
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| ---------------------- | --------- | :------ | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `--from` | [date] | `now()` | The start of the time window |
|
||||
| `--to` | [date] | | The end of the time window |
|
||||
| `--maxDocs` | [number] | | The maximum number of documents we are allowed to generate |
|
||||
| `--maxDocsConfidence` | [number] | `1` | Expert setting: --maxDocs relies on accurate tpm reporting of generators setting this to >1 will widen the estimated data generation range |
|
||||
| `--live` | [boolean] | | Generate and index data continuously |
|
||||
| `--dryRun` | [boolean] | | Enumerates the stream without sending events to Elasticsearch |
|
||||
| `--scenarioOpts` | | | Raw options specific to the scenario |
|
||||
| `--forceLegacyIndices` | [boolean] | `false` | Force writing to legacy indices |
|
||||
| Option | Type | Default | Description |
|
||||
| ---------------- | --------- | :------ | ------------------------------------ |
|
||||
| `--from` | [date] | `now()` | The start of the time window |
|
||||
| `--to` | [date] | | The end of the time window |
|
||||
| `--live` | [boolean] | | Generate and index data continuously |
|
||||
| `--scenarioOpts` | | | Raw options specific to the scenario |
|
||||
|
||||
Note:
|
||||
|
||||
- The default `--to` is `15m` unless `--maxDocs` is specified in which case `--to` is calculated based on the scenario's TPM.
|
||||
- You can combine `--from` `--maxDocs` and `--to` with `--live` to back-fill some data.
|
||||
- The default `--to` is `15m`.
|
||||
- You can combine `--from` and `--to` with `--live` to back-fill some data.
|
||||
|
||||
### Setup options
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| ----------------- | --------- | :------ | ------------------------------------------------------------------------------------------------------- |
|
||||
| `--numShards` | [number] | | Updates the component templates to update the number of primary shards, requires cloudId to be provided |
|
||||
| `--clean` | [boolean] | `false` | Clean APM data before indexing new data |
|
||||
| `--workers` | [number] | | Amount of Node.js worker threads |
|
||||
| `--logLevel` | [enum] | `info` | Log level |
|
||||
| `--gcpRepository` | [string] | | Allows you to register a GCP repository in <client_name>:<bucket>[:base_path] format |
|
||||
| `-p` | [string] | | Specify multiple sets of streamaggregators to be included in the StreamProcessor |
|
||||
| Option | Type | Default | Description |
|
||||
| ------------ | --------- | :------ | --------------------------------------- |
|
||||
| `--clean` | [boolean] | `false` | Clean APM data before indexing new data |
|
||||
| `--workers` | [number] | | Amount of Node.js worker threads |
|
||||
| `--logLevel` | [enum] | `info` | Log level |
|
||||
|
||||
## Testing
|
||||
|
||||
|
|
|
@ -6,16 +6,7 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export { timerange } from './src/lib/timerange';
|
||||
export { apm } from './src/lib/apm';
|
||||
export { dedot } from './src/lib/utils/dedot';
|
||||
export { stackMonitoring } from './src/lib/stack_monitoring';
|
||||
export { observer } from './src/lib/agent_config';
|
||||
export { cleanWriteTargets } from './src/lib/utils/clean_write_targets';
|
||||
export { createLogger, LogLevel } from './src/lib/utils/create_logger';
|
||||
|
||||
export type { Fields } from './src/lib/entity';
|
||||
export type { ApmFields } from './src/lib/apm/apm_fields';
|
||||
export type { ApmException, ApmSynthtraceEsClient } from './src/lib/apm';
|
||||
export type { EntityIterable } from './src/lib/entity_iterable';
|
||||
export { EntityArrayIterable } from './src/lib/entity_iterable';
|
||||
export { ApmSynthtraceEsClient } from './src/lib/apm/client/apm_synthtrace_es_client';
|
||||
export { ApmSynthtraceKibanaClient } from './src/lib/apm/client/apm_synthtrace_kibana_client';
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"type": "shared-common",
|
||||
"type": "shared-server",
|
||||
"id": "@kbn/apm-synthtrace",
|
||||
"devOnly": true,
|
||||
"owner": "@elastic/apm-ui"
|
||||
|
|
|
@ -6,16 +6,12 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
import datemath from '@kbn/datemath';
|
||||
import yargs from 'yargs/yargs';
|
||||
import { Argv } from 'yargs';
|
||||
import yargs from 'yargs/yargs';
|
||||
import { intervalToMs } from './utils/interval_to_ms';
|
||||
import { parseRunCliFlags } from './utils/parse_run_cli_flags';
|
||||
import { startHistoricalDataUpload } from './utils/start_historical_data_upload';
|
||||
import { startLiveDataUpload } from './utils/start_live_data_upload';
|
||||
import { parseRunCliFlags } from './utils/parse_run_cli_flags';
|
||||
import { getCommonServices } from './utils/get_common_services';
|
||||
import { ApmSynthtraceKibanaClient } from '../lib/apm/client/apm_synthtrace_kibana_client';
|
||||
import { StreamAggregator } from '../lib/stream_aggregator';
|
||||
import { ServicMetricsAggregator } from '../lib/apm/aggregators/service_metrics_aggregator';
|
||||
|
||||
function options(y: Argv) {
|
||||
return y
|
||||
|
@ -32,31 +28,6 @@ function options(y: Argv) {
|
|||
describe: 'Kibana target, used to bootstrap datastreams/mappings/templates/settings',
|
||||
string: true,
|
||||
})
|
||||
.option('apm', {
|
||||
describe:
|
||||
'APM Server target. Send data to APM over the intake API instead of generating ES documents',
|
||||
string: true,
|
||||
})
|
||||
.option('cloudId', {
|
||||
describe:
|
||||
'Provide connection information and will force APM on the cloud to migrate to run as a Fleet integration',
|
||||
string: true,
|
||||
})
|
||||
.option('local', {
|
||||
describe:
|
||||
'Shortcut during development, assumes `yarn es snapshot` and `yarn start` are running',
|
||||
boolean: true,
|
||||
})
|
||||
.option('username', {
|
||||
describe: 'Basic authentication username',
|
||||
string: true,
|
||||
default: 'elastic',
|
||||
})
|
||||
.option('password', {
|
||||
describe: 'Basic authentication password',
|
||||
string: true,
|
||||
default: 'changeme',
|
||||
})
|
||||
.option('from', {
|
||||
description: 'The start of the time window',
|
||||
})
|
||||
|
@ -67,25 +38,6 @@ function options(y: Argv) {
|
|||
description: 'Generate and index data continuously',
|
||||
boolean: true,
|
||||
})
|
||||
.option('dryRun', {
|
||||
description: 'Enumerates the stream without sending events to Elasticsearch ',
|
||||
boolean: true,
|
||||
})
|
||||
.option('maxDocs', {
|
||||
description: 'The maximum number of documents we are allowed to generate',
|
||||
number: true,
|
||||
})
|
||||
.option('maxDocsConfidence', {
|
||||
description:
|
||||
'Expert setting: --maxDocs relies on accurate tpm reporting of generators setting this to >1 will widen the estimated data generation range',
|
||||
number: true,
|
||||
default: 1,
|
||||
})
|
||||
.option('numShards', {
|
||||
description:
|
||||
'Updates the component templates to update the number of primary shards, requires cloudId to be provided',
|
||||
number: true,
|
||||
})
|
||||
.option('clean', {
|
||||
describe: 'Clean APM indices before indexing new data',
|
||||
default: false,
|
||||
|
@ -95,164 +47,60 @@ function options(y: Argv) {
|
|||
describe: 'Amount of Node.js worker threads',
|
||||
number: true,
|
||||
})
|
||||
.option('concurrency', {
|
||||
describe: 'Concurrency of Elasticsearch client bulk indexing',
|
||||
number: true,
|
||||
default: 1,
|
||||
})
|
||||
.option('versionOverride', {
|
||||
describe: 'Package/observer version override',
|
||||
string: true,
|
||||
})
|
||||
.option('logLevel', {
|
||||
describe: 'Log level',
|
||||
default: 'info',
|
||||
})
|
||||
.option('forceLegacyIndices', {
|
||||
describe: 'Force writing to legacy indices',
|
||||
boolean: true,
|
||||
})
|
||||
.option('skipPackageInstall', {
|
||||
describe: 'Skip automatically installing the package',
|
||||
boolean: true,
|
||||
default: false,
|
||||
})
|
||||
.option('scenarioOpts', {
|
||||
describe: 'Options specific to the scenario',
|
||||
coerce: (arg) => {
|
||||
return arg as Record<string, any> | undefined;
|
||||
},
|
||||
})
|
||||
.option('gcpRepository', {
|
||||
describe:
|
||||
'Allows you to register a GCP repository in <client_name>:<bucket>[:base_path] format',
|
||||
string: true,
|
||||
})
|
||||
.option('streamProcessors', {
|
||||
describe: 'Specify multiple sets of stream aggregators to be included in the StreamProcessor',
|
||||
string: true,
|
||||
array: true,
|
||||
alias: 'p',
|
||||
})
|
||||
.conflicts('target', 'cloudId')
|
||||
.conflicts('kibana', 'cloudId')
|
||||
.conflicts('local', 'target')
|
||||
.conflicts('local', 'kibana')
|
||||
.conflicts('local', 'cloudId');
|
||||
.showHelpOnFail(false);
|
||||
}
|
||||
|
||||
async function run(argv: RunCliFlags) {
|
||||
const runOptions = parseRunCliFlags(argv);
|
||||
|
||||
const toMs = datemath.parse(String(argv.to ?? 'now'))!.valueOf();
|
||||
const to = new Date(toMs);
|
||||
|
||||
const defaultTimeRange = '1m';
|
||||
|
||||
const fromMs = argv.from
|
||||
? datemath.parse(String(argv.from))!.valueOf()
|
||||
: toMs - intervalToMs(defaultTimeRange);
|
||||
const from = new Date(fromMs);
|
||||
|
||||
const live = argv.live;
|
||||
|
||||
if (live) {
|
||||
await startLiveDataUpload({ runOptions, start: from });
|
||||
} else {
|
||||
await startHistoricalDataUpload({ runOptions, from, to });
|
||||
}
|
||||
}
|
||||
|
||||
export type RunCliFlags = ReturnType<typeof options>['argv'];
|
||||
|
||||
export function runSynthtrace() {
|
||||
yargs(process.argv.slice(2))
|
||||
.command(
|
||||
'*',
|
||||
'Generate data and index into Elasticsearch',
|
||||
options,
|
||||
async (argv: RunCliFlags) => {
|
||||
if (argv.local) {
|
||||
argv.target = 'http://localhost:9200';
|
||||
}
|
||||
if (argv.kibana && !argv.target) {
|
||||
const url = new URL(argv.kibana);
|
||||
// super naive inference of ES target based on public kibana Cloud endpoint
|
||||
if (url.hostname.match(/\.kb\./)) {
|
||||
argv.target = argv.kibana.replace(/\.kb\./, '.es.');
|
||||
}
|
||||
}
|
||||
|
||||
const runOptions = parseRunCliFlags(argv);
|
||||
|
||||
const { logger, apmEsClient, apmIntakeClient } = getCommonServices(runOptions);
|
||||
|
||||
const toMs = datemath.parse(String(argv.to ?? 'now'))!.valueOf();
|
||||
const to = new Date(toMs);
|
||||
const defaultTimeRange = !runOptions.maxDocs ? '15m' : '520w';
|
||||
const fromMs = argv.from
|
||||
? datemath.parse(String(argv.from))!.valueOf()
|
||||
: toMs - intervalToMs(defaultTimeRange);
|
||||
const from = new Date(fromMs);
|
||||
|
||||
const live = argv.live;
|
||||
|
||||
if (runOptions.dryRun) {
|
||||
await startHistoricalDataUpload(apmEsClient, logger, runOptions, from, to, '8.0.0');
|
||||
return;
|
||||
}
|
||||
|
||||
// we need to know the running version to generate events that satisfy the min version requirements
|
||||
let version = await apmEsClient.runningVersion();
|
||||
logger.info(`Discovered Elasticsearch running version: ${version}`);
|
||||
version = version.replace('-SNAPSHOT', '');
|
||||
|
||||
// We automatically set up managed APM either by migrating on cloud or installing the package locally
|
||||
if (runOptions.cloudId || argv.local || argv.kibana) {
|
||||
const kibanaClient = new ApmSynthtraceKibanaClient(logger);
|
||||
if (runOptions.cloudId) {
|
||||
await kibanaClient.migrateCloudToManagedApm(
|
||||
runOptions.cloudId,
|
||||
runOptions.username,
|
||||
runOptions.password
|
||||
);
|
||||
} else {
|
||||
let kibanaUrl: string | null = argv.kibana ?? null;
|
||||
if (argv.local) {
|
||||
kibanaUrl = await kibanaClient.discoverLocalKibana();
|
||||
}
|
||||
if (!kibanaUrl) throw Error('kibanaUrl could not be determined');
|
||||
if (!argv.skipPackageInstall) {
|
||||
await kibanaClient.installApmPackage(
|
||||
kibanaUrl,
|
||||
version,
|
||||
runOptions.username,
|
||||
runOptions.password
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (runOptions.cloudId && runOptions.numShards && runOptions.numShards > 0) {
|
||||
await apmEsClient.updateComponentTemplates(runOptions.numShards);
|
||||
}
|
||||
const aggregators: StreamAggregator[] = [];
|
||||
const registry = new Map<string, () => StreamAggregator[]>([
|
||||
['service', () => [new ServicMetricsAggregator()]],
|
||||
]);
|
||||
if (runOptions.streamProcessors && runOptions.streamProcessors.length > 0) {
|
||||
for (const processorName of runOptions.streamProcessors) {
|
||||
const factory = registry.get(processorName);
|
||||
if (factory) {
|
||||
aggregators.push(...factory());
|
||||
} else {
|
||||
throw new Error(
|
||||
`No processor named ${processorName} configured on known processor registry`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (argv.clean) {
|
||||
if (argv.apm) {
|
||||
await apmEsClient.clean(['metrics-apm.service-*']);
|
||||
} else {
|
||||
await apmEsClient.clean(aggregators.map((a) => a.getDataStreamName() + '-*'));
|
||||
}
|
||||
}
|
||||
if (runOptions.gcpRepository) {
|
||||
await apmEsClient.registerGcpRepository(runOptions.gcpRepository);
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Starting data generation\n: ${JSON.stringify(
|
||||
{
|
||||
...runOptions,
|
||||
from: from.toISOString(),
|
||||
to: to.toISOString(),
|
||||
},
|
||||
null,
|
||||
2
|
||||
)}`
|
||||
);
|
||||
|
||||
for (const aggregator of aggregators) await apmEsClient.createDataStream(aggregator);
|
||||
|
||||
if (runOptions.maxDocs !== 0)
|
||||
await startHistoricalDataUpload(apmEsClient, logger, runOptions, from, to, version);
|
||||
|
||||
if (live) {
|
||||
await startLiveDataUpload(apmEsClient, apmIntakeClient, logger, runOptions, to, version);
|
||||
}
|
||||
}
|
||||
)
|
||||
.command('*', 'Generate data and index into Elasticsearch', options, (argv: RunCliFlags) => {
|
||||
run(argv).catch((err) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
})
|
||||
.parse();
|
||||
}
|
||||
|
|
|
@ -6,11 +6,17 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { SynthtraceGenerator, Timerange } from '@kbn/apm-synthtrace-client';
|
||||
import { Readable } from 'stream';
|
||||
import { ApmSynthtraceEsClient } from '../lib/apm/client/apm_synthtrace_es_client';
|
||||
import { Logger } from '../lib/utils/create_logger';
|
||||
import { RunOptions } from './utils/parse_run_cli_flags';
|
||||
import { EntityIterable } from '../lib/entity_iterable';
|
||||
|
||||
type Generate<TFields> = (range: { from: Date; to: Date }) => EntityIterable<TFields>;
|
||||
export type Scenario<TFields> = (options: RunOptions) => Promise<{
|
||||
type Generate<TFields> = (options: {
|
||||
range: Timerange;
|
||||
}) => SynthtraceGenerator<TFields> | Array<SynthtraceGenerator<TFields>> | Readable;
|
||||
|
||||
export type Scenario<TFields> = (options: RunOptions & { logger: Logger }) => Promise<{
|
||||
bootstrap?: (options: { apmEsClient: ApmSynthtraceEsClient }) => Promise<void>;
|
||||
generate: Generate<TFields>;
|
||||
mapToIndex?: (data: Record<string, any>) => string;
|
||||
}>;
|
||||
|
|
49
packages/kbn-apm-synthtrace/src/cli/utils/bootstrap.ts
Normal file
49
packages/kbn-apm-synthtrace/src/cli/utils/bootstrap.ts
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { createLogger } from '../../lib/utils/create_logger';
|
||||
import { getEsClient } from './get_es_client';
|
||||
import { getKibanaClient } from './get_kibana_client';
|
||||
import { getServiceUrls } from './get_service_urls';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
|
||||
export async function bootstrap(runOptions: RunOptions) {
|
||||
const logger = createLogger(runOptions.logLevel);
|
||||
|
||||
const { kibanaUrl, esUrl } = await getServiceUrls({ ...runOptions, logger });
|
||||
|
||||
const kibanaClient = getKibanaClient({
|
||||
target: kibanaUrl,
|
||||
logger,
|
||||
});
|
||||
|
||||
const latestPackageVersion = await kibanaClient.fetchLatestApmPackageVersion();
|
||||
|
||||
const version = runOptions.versionOverride || latestPackageVersion;
|
||||
|
||||
const apmEsClient = getEsClient({
|
||||
target: esUrl,
|
||||
logger,
|
||||
concurrency: runOptions.concurrency,
|
||||
version,
|
||||
});
|
||||
|
||||
await kibanaClient.installApmPackage(latestPackageVersion);
|
||||
|
||||
if (runOptions.clean) {
|
||||
await apmEsClient.clean();
|
||||
}
|
||||
|
||||
return {
|
||||
logger,
|
||||
apmEsClient,
|
||||
version,
|
||||
kibanaUrl,
|
||||
esUrl,
|
||||
};
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Client, ClientOptions } from '@elastic/elasticsearch';
|
||||
import { ApmSynthtraceApmClient } from '../../lib/apm/client/apm_synthtrace_apm_client';
|
||||
import { ApmSynthtraceEsClient } from '../../lib/apm/client/apm_synthtrace_es_client';
|
||||
import { createLogger, Logger } from '../../lib/utils/create_logger';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
|
||||
export function getLogger({ logLevel }: RunOptions) {
|
||||
return createLogger(logLevel);
|
||||
}
|
||||
|
||||
export function getCommonServices(
|
||||
{ target, cloudId, apm, username, password, logLevel, forceLegacyIndices }: RunOptions,
|
||||
logger?: Logger
|
||||
) {
|
||||
if (!target && !cloudId) {
|
||||
throw Error('target or cloudId needs to be specified');
|
||||
}
|
||||
const options: ClientOptions = !!target ? { node: target } : { cloud: { id: cloudId! } };
|
||||
options.auth = {
|
||||
username,
|
||||
password,
|
||||
};
|
||||
// Useful when debugging trough mitmproxy
|
||||
/*
|
||||
options.Connection = HttpConnection;
|
||||
options.proxy = 'http://localhost:8080';
|
||||
options.tls = {
|
||||
rejectUnauthorized: false,
|
||||
};
|
||||
|
||||
*/
|
||||
const client = new Client(options);
|
||||
|
||||
logger = logger ?? createLogger(logLevel);
|
||||
|
||||
const apmEsClient = new ApmSynthtraceEsClient(client, logger, {
|
||||
forceLegacyIndices,
|
||||
refreshAfterIndex: false,
|
||||
});
|
||||
const apmIntakeClient = apm ? new ApmSynthtraceApmClient(apm, logger) : null;
|
||||
|
||||
return {
|
||||
logger,
|
||||
apmEsClient,
|
||||
apmIntakeClient,
|
||||
};
|
||||
}
|
||||
|
||||
export type RunServices = ReturnType<typeof getCommonServices>;
|
36
packages/kbn-apm-synthtrace/src/cli/utils/get_es_client.ts
Normal file
36
packages/kbn-apm-synthtrace/src/cli/utils/get_es_client.ts
Normal file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Client } from '@elastic/elasticsearch';
|
||||
import { ApmSynthtraceEsClient } from '../../lib/apm/client/apm_synthtrace_es_client';
|
||||
import { Logger } from '../../lib/utils/create_logger';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
|
||||
export function getEsClient({
|
||||
target,
|
||||
logger,
|
||||
version,
|
||||
concurrency,
|
||||
}: Pick<RunOptions, 'concurrency'> & {
|
||||
version: string;
|
||||
target: string;
|
||||
logger: Logger;
|
||||
}) {
|
||||
const client = new Client({
|
||||
node: target,
|
||||
});
|
||||
|
||||
const apmEsClient = new ApmSynthtraceEsClient({
|
||||
client,
|
||||
logger,
|
||||
version,
|
||||
concurrency,
|
||||
});
|
||||
|
||||
return apmEsClient;
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmSynthtraceKibanaClient } from '../../lib/apm/client/apm_synthtrace_kibana_client';
|
||||
import { Logger } from '../../lib/utils/create_logger';
|
||||
|
||||
export function getKibanaClient({ target, logger }: { target: string; logger: Logger }) {
|
||||
const kibanaClient = new ApmSynthtraceKibanaClient({
|
||||
logger,
|
||||
target,
|
||||
});
|
||||
|
||||
return kibanaClient;
|
||||
}
|
|
@ -6,9 +6,9 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Fields } from '@kbn/apm-synthtrace-client';
|
||||
import { Logger } from '../../lib/utils/create_logger';
|
||||
import { Scenario } from '../scenario';
|
||||
import { Fields } from '../../lib/entity';
|
||||
|
||||
export function getScenario({ file, logger }: { file: string; logger: Logger }) {
|
||||
logger.debug(`Loading scenario from ${file}`);
|
||||
|
|
116
packages/kbn-apm-synthtrace/src/cli/utils/get_service_urls.ts
Normal file
116
packages/kbn-apm-synthtrace/src/cli/utils/get_service_urls.ts
Normal file
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import fetch from 'node-fetch';
|
||||
import { format, parse, Url } from 'url';
|
||||
import { Logger } from '../../lib/utils/create_logger';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
|
||||
async function discoverAuth(parsedTarget: Url) {
|
||||
const possibleCredentials = [`admin:changeme`, `elastic:changeme`];
|
||||
for (const auth of possibleCredentials) {
|
||||
const url = format({
|
||||
...parsedTarget,
|
||||
auth,
|
||||
});
|
||||
let status: number;
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
status = response.status;
|
||||
} catch (err) {
|
||||
status = 0;
|
||||
}
|
||||
|
||||
if (status === 200) {
|
||||
return auth;
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Failed to authenticate user for ${format(parsedTarget)}`);
|
||||
}
|
||||
|
||||
async function getKibanaUrl({ target, logger }: { target: string; logger: Logger }) {
|
||||
try {
|
||||
logger.debug(`Checking Kibana URL ${target} for a redirect`);
|
||||
|
||||
const unredirectedResponse = await fetch(target, {
|
||||
method: 'HEAD',
|
||||
follow: 1,
|
||||
redirect: 'manual',
|
||||
});
|
||||
|
||||
const discoveredKibanaUrl =
|
||||
unredirectedResponse.headers.get('location')?.replace('/spaces/enter', '') || target;
|
||||
|
||||
const parsedTarget = parse(target);
|
||||
|
||||
const parsedDiscoveredUrl = parse(discoveredKibanaUrl);
|
||||
|
||||
const discoveredKibanaUrlWithAuth = format({
|
||||
...parsedDiscoveredUrl,
|
||||
auth: parsedTarget.auth,
|
||||
});
|
||||
|
||||
const redirectedResponse = await fetch(discoveredKibanaUrlWithAuth, {
|
||||
method: 'HEAD',
|
||||
});
|
||||
|
||||
if (redirectedResponse.status !== 200) {
|
||||
throw new Error(
|
||||
`Expected HTTP 200 from ${discoveredKibanaUrlWithAuth}, got ${redirectedResponse.status}`
|
||||
);
|
||||
}
|
||||
|
||||
logger.info(`Discovered kibana running at: ${discoveredKibanaUrlWithAuth}`);
|
||||
|
||||
return discoveredKibanaUrlWithAuth.replace(/\/$/, '');
|
||||
} catch (error) {
|
||||
throw new Error(`Could not connect to Kibana: ` + error.message);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getServiceUrls({ logger, target, kibana }: RunOptions & { logger: Logger }) {
|
||||
if (!target) {
|
||||
// assume things are running locally
|
||||
kibana = kibana || 'http://localhost:5601';
|
||||
target = 'http://localhost:9200';
|
||||
}
|
||||
|
||||
if (!target) {
|
||||
throw new Error('Could not determine an Elasticsearch target');
|
||||
}
|
||||
|
||||
const parsedTarget = parse(target);
|
||||
|
||||
let auth = parsedTarget.auth;
|
||||
|
||||
if (!parsedTarget.auth) {
|
||||
auth = await discoverAuth(parsedTarget);
|
||||
}
|
||||
|
||||
const formattedEsUrl = format({
|
||||
...parsedTarget,
|
||||
auth,
|
||||
});
|
||||
|
||||
const suspectedKibanaUrl = kibana || target.replace('.es', '.kb');
|
||||
|
||||
const parsedKibanaUrl = parse(suspectedKibanaUrl);
|
||||
|
||||
const kibanaUrlWithAuth = format({
|
||||
...parsedKibanaUrl,
|
||||
auth,
|
||||
});
|
||||
|
||||
const validatedKibanaUrl = await getKibanaUrl({ target: kibanaUrlWithAuth, logger });
|
||||
|
||||
return {
|
||||
kibanaUrl: validatedKibanaUrl,
|
||||
esUrl: formattedEsUrl,
|
||||
};
|
||||
}
|
41
packages/kbn-apm-synthtrace/src/cli/utils/logger_proxy.ts
Normal file
41
packages/kbn-apm-synthtrace/src/cli/utils/logger_proxy.ts
Normal file
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import util from 'util';
|
||||
import { parentPort, isMainThread, workerData } from 'worker_threads';
|
||||
import { createLogger, Logger, LogLevel } from '../../lib/utils/create_logger';
|
||||
import { logPerf } from '../../lib/utils/log_perf';
|
||||
import { WorkerData } from './synthtrace_worker';
|
||||
|
||||
const { workerId } = isMainThread ? { workerId: -1 } : (workerData as WorkerData);
|
||||
|
||||
function getLogMethod(log: LogLevel) {
|
||||
return (...args: any) => {
|
||||
parentPort?.postMessage({
|
||||
log,
|
||||
args: [`[${workerId}]`].concat(
|
||||
args.map((arg: any) =>
|
||||
typeof arg === 'string' || typeof arg === 'number'
|
||||
? arg
|
||||
: util.inspect(arg, { depth: 10 })
|
||||
)
|
||||
),
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
// logging proxy to main thread, ensures we see real time logging
|
||||
export const loggerProxy: Logger = isMainThread
|
||||
? createLogger(LogLevel.trace)
|
||||
: {
|
||||
perf: <T extends any>(name: string, cb: () => T): T => {
|
||||
return logPerf(loggerProxy, LogLevel.trace, name, cb);
|
||||
},
|
||||
debug: getLogMethod(LogLevel.debug),
|
||||
info: getLogMethod(LogLevel.info),
|
||||
error: getLogMethod(LogLevel.error),
|
||||
};
|
|
@ -63,22 +63,13 @@ export function parseRunCliFlags(flags: RunCliFlags) {
|
|||
return {
|
||||
...pick(
|
||||
flags,
|
||||
'maxDocs',
|
||||
'maxDocsConfidence',
|
||||
'target',
|
||||
'apm',
|
||||
'cloudId',
|
||||
'username',
|
||||
'password',
|
||||
'workers',
|
||||
'flushSizeBulk',
|
||||
'flushSize',
|
||||
'numShards',
|
||||
'scenarioOpts',
|
||||
'forceLegacyIndices',
|
||||
'dryRun',
|
||||
'gcpRepository',
|
||||
'streamProcessors'
|
||||
'kibana',
|
||||
'concurrency',
|
||||
'versionOverride',
|
||||
'clean'
|
||||
),
|
||||
logLevel: parsedLogLevel,
|
||||
file: parsedFile,
|
||||
|
|
|
@ -5,104 +5,56 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import moment from 'moment';
|
||||
import { Worker } from 'worker_threads';
|
||||
import Path from 'path';
|
||||
import { range } from 'lodash';
|
||||
import pLimit from 'p-limit';
|
||||
import moment from 'moment';
|
||||
import { cpus } from 'os';
|
||||
import Path from 'path';
|
||||
import { Worker } from 'worker_threads';
|
||||
import { LogLevel } from '../../..';
|
||||
import { bootstrap } from './bootstrap';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
import { getScenario } from './get_scenario';
|
||||
import { ApmSynthtraceEsClient, LogLevel } from '../../..';
|
||||
import { Logger } from '../../lib/utils/create_logger';
|
||||
import { WorkerData } from './synthtrace_worker';
|
||||
|
||||
export async function startHistoricalDataUpload(
|
||||
esClient: ApmSynthtraceEsClient,
|
||||
logger: Logger,
|
||||
runOptions: RunOptions,
|
||||
from: Date,
|
||||
to: Date,
|
||||
version: string
|
||||
) {
|
||||
// if we want to generate a maximum number of documents reverse generation to descend.
|
||||
[from, to] = runOptions.maxDocs ? [to, from] : [from, to];
|
||||
|
||||
const file = runOptions.file;
|
||||
const scenario = await logger.perf('get_scenario', () => getScenario({ file, logger }));
|
||||
const { generate } = await scenario(runOptions);
|
||||
export async function startHistoricalDataUpload({
|
||||
runOptions,
|
||||
from,
|
||||
to,
|
||||
}: {
|
||||
runOptions: RunOptions;
|
||||
from: Date;
|
||||
to: Date;
|
||||
}) {
|
||||
const { logger, esUrl, version } = await bootstrap(runOptions);
|
||||
|
||||
const cores = cpus().length;
|
||||
// settle on a reasonable max concurrency arbitrarily capping at 10.
|
||||
let maxConcurrency = Math.min(10, cores - 1);
|
||||
// maxWorkers to be spawned is double that of maxConcurrency. We estimate the number of ranges over
|
||||
// maxConcurrency, if that is too conservative this provides more available workers to complete the job.
|
||||
// If any worker finds that work is already completed they will spin down immediately.
|
||||
let maxWorkers = maxConcurrency * 2;
|
||||
logger.info(
|
||||
`Discovered ${cores} cores, splitting work over ${maxWorkers} workers with limited concurrency: ${maxConcurrency}`
|
||||
);
|
||||
|
||||
// if --workers N is specified it should take precedence over inferred maximum workers
|
||||
if (runOptions.workers) {
|
||||
// ensure maxWorkers is at least 1
|
||||
maxWorkers = Math.max(1, runOptions.workers);
|
||||
// ensure max concurrency is at least 1 or the ceil of --workers N / 2
|
||||
maxConcurrency = Math.ceil(Math.max(1, maxWorkers / 2));
|
||||
logger.info(
|
||||
`updating maxWorkers to ${maxWorkers} and maxConcurrency to ${maxConcurrency} because it was explicitly set through --workers`
|
||||
);
|
||||
}
|
||||
let workers = Math.min(runOptions.workers ?? 10, cores - 1);
|
||||
|
||||
const rangeEnd = to;
|
||||
|
||||
const events = logger.perf('generate_scenario', () => generate({ from, to }));
|
||||
const ratePerMinute = events.estimatedRatePerMinute();
|
||||
logger.info(
|
||||
`Scenario is generating ${ratePerMinute.toLocaleString()} events per minute interval`
|
||||
);
|
||||
let rangeEnd = to;
|
||||
if (runOptions.maxDocs) {
|
||||
// estimate a more accurate range end for when --maxDocs is specified
|
||||
rangeEnd = moment(from)
|
||||
// estimatedRatePerMinute() is not exact if the generator is yielding variable documents
|
||||
// the rate is calculated by peeking the first yielded event and its children.
|
||||
// for real complex cases manually specifying --to is encouraged.
|
||||
.subtract((runOptions.maxDocs / ratePerMinute) * runOptions.maxDocsConfidence, 'm')
|
||||
.toDate();
|
||||
}
|
||||
const diff = moment(from).diff(rangeEnd);
|
||||
|
||||
const d = moment.duration(Math.abs(diff), 'ms');
|
||||
logger.info(
|
||||
`Range: ${d.years()} days ${d.days()} days, ${d.hours()} hours ${d.minutes()} minutes ${d.seconds()} seconds`
|
||||
);
|
||||
|
||||
// make sure ranges cover at least 100k documents
|
||||
const minIntervalSpan = moment.duration(100000 / ratePerMinute, 'm');
|
||||
const minIntervalSpan = moment.duration(60, 'm');
|
||||
|
||||
const minNumberOfRanges = d.asMilliseconds() / minIntervalSpan.asMilliseconds();
|
||||
if (minNumberOfRanges < maxWorkers) {
|
||||
maxWorkers = Math.max(1, Math.floor(minNumberOfRanges));
|
||||
maxConcurrency = Math.max(1, maxWorkers / 2);
|
||||
if (minNumberOfRanges < workers) {
|
||||
workers = Math.max(1, Math.floor(minNumberOfRanges));
|
||||
if (runOptions.workers) {
|
||||
logger.info(
|
||||
`Ignoring --workers ${runOptions.workers} since each worker would not see enough data`
|
||||
);
|
||||
}
|
||||
logger.info(
|
||||
`updating maxWorkers to ${maxWorkers} and maxConcurrency to ${maxConcurrency} to ensure each worker does enough work`
|
||||
);
|
||||
logger.info(`updating maxWorkers to ${workers} to ensure each worker does enough work`);
|
||||
}
|
||||
|
||||
logger.info(`Generating data from ${from.toISOString()} to ${rangeEnd.toISOString()}`);
|
||||
|
||||
type WorkerMessages =
|
||||
| { log: LogLevel; args: any[] }
|
||||
| { workerIndex: number; processedDocuments: number }
|
||||
| { workerIndex: number; firstTimestamp: Date }
|
||||
| { workerIndex: number; lastTimestamp: Date };
|
||||
|
||||
interface WorkerTotals {
|
||||
total: number;
|
||||
bucketFrom: Date;
|
||||
bucketTo: Date;
|
||||
firstTimestamp?: Date;
|
||||
lastTimestamp?: Date;
|
||||
interface WorkerMessages {
|
||||
log: LogLevel;
|
||||
args: any[];
|
||||
}
|
||||
|
||||
function rangeStep(interval: number) {
|
||||
|
@ -112,8 +64,8 @@ export async function startHistoricalDataUpload(
|
|||
|
||||
// precalculate intervals to spawn workers over.
|
||||
// abs() the difference to make add/subtract explicit in rangeStep() in favor of subtracting a negative number
|
||||
const intervalSpan = Math.abs(diff / maxWorkers);
|
||||
const intervals = range(0, maxWorkers)
|
||||
const intervalSpan = Math.abs(diff / workers);
|
||||
const intervals = range(0, workers)
|
||||
.map((i) => intervalSpan * i)
|
||||
.map((interval, index) => ({
|
||||
workerIndex: index,
|
||||
|
@ -121,13 +73,6 @@ export async function startHistoricalDataUpload(
|
|||
bucketTo: rangeStep(interval + intervalSpan),
|
||||
}));
|
||||
|
||||
// precalculate interval state for each worker to report on.
|
||||
let totalProcessed = 0;
|
||||
const workerProcessed = range(0, maxWorkers).reduce<Record<number, WorkerTotals>>((p, c, i) => {
|
||||
p[i] = { total: 0, bucketFrom: intervals[i].bucketFrom, bucketTo: intervals[i].bucketTo };
|
||||
return p;
|
||||
}, {});
|
||||
|
||||
function runService({
|
||||
bucketFrom,
|
||||
bucketTo,
|
||||
|
@ -138,56 +83,34 @@ export async function startHistoricalDataUpload(
|
|||
workerIndex: number;
|
||||
}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
logger.info(`Setting up Worker: ${workerIndex}`);
|
||||
if (runOptions.maxDocs && totalProcessed >= runOptions.maxDocs + 10000) {
|
||||
logger.info(
|
||||
`Worker ${workerIndex} has no need to run since ${totalProcessed} documents were already processed `
|
||||
);
|
||||
return resolve(null);
|
||||
}
|
||||
const progressToConsole = runOptions?.maxDocs
|
||||
? Math.min(2000000, runOptions.maxDocs / 20)
|
||||
: 2000000;
|
||||
logger.debug(`Setting up Worker: ${workerIndex}`);
|
||||
const workerData: WorkerData = {
|
||||
runOptions,
|
||||
bucketFrom,
|
||||
bucketTo,
|
||||
workerId: workerIndex.toString(),
|
||||
esUrl,
|
||||
version,
|
||||
};
|
||||
const worker = new Worker(Path.join(__dirname, './worker.js'), {
|
||||
workerData: {
|
||||
runOptions,
|
||||
bucketFrom,
|
||||
bucketTo,
|
||||
workerIndex,
|
||||
version,
|
||||
},
|
||||
workerData,
|
||||
});
|
||||
worker.on('message', (message: WorkerMessages) => {
|
||||
if ('workerIndex' in message) {
|
||||
if ('processedDocuments' in message) {
|
||||
totalProcessed += message.processedDocuments;
|
||||
workerProcessed[workerIndex].total += message.processedDocuments;
|
||||
const check = Math.round(totalProcessed / 10000) * 10000;
|
||||
if (check % progressToConsole === 0) {
|
||||
logger.info(`processed: ${totalProcessed} documents`);
|
||||
}
|
||||
}
|
||||
if ('firstTimestamp' in message)
|
||||
workerProcessed[message.workerIndex].firstTimestamp = message.firstTimestamp;
|
||||
if ('lastTimestamp' in message)
|
||||
workerProcessed[message.workerIndex].lastTimestamp = message.lastTimestamp;
|
||||
} else {
|
||||
switch (message.log) {
|
||||
case LogLevel.debug:
|
||||
logger.debug.apply({}, message.args);
|
||||
return;
|
||||
case LogLevel.info:
|
||||
logger.info.apply({}, message.args);
|
||||
return;
|
||||
case LogLevel.trace:
|
||||
logger.debug.apply({}, message.args);
|
||||
return;
|
||||
case LogLevel.error:
|
||||
logger.error.apply({}, message.args);
|
||||
return;
|
||||
default:
|
||||
logger.info(message);
|
||||
}
|
||||
switch (message.log) {
|
||||
case LogLevel.debug:
|
||||
logger.debug.apply({}, message.args);
|
||||
return;
|
||||
case LogLevel.info:
|
||||
logger.info.apply({}, message.args);
|
||||
return;
|
||||
case LogLevel.trace:
|
||||
logger.debug.apply({}, message.args);
|
||||
return;
|
||||
case LogLevel.error:
|
||||
logger.error.apply({}, message.args);
|
||||
return;
|
||||
default:
|
||||
logger.info(message);
|
||||
}
|
||||
});
|
||||
worker.on('error', (message) => {
|
||||
|
@ -201,22 +124,11 @@ export async function startHistoricalDataUpload(
|
|||
}
|
||||
resolve(null);
|
||||
});
|
||||
worker.postMessage('setup');
|
||||
worker.postMessage('start');
|
||||
});
|
||||
}
|
||||
|
||||
const limiter = pLimit(Math.max(1, Math.floor(intervals.length / 2)));
|
||||
const workers = range(0, intervals.length).map((index) => () => runService(intervals[index]));
|
||||
return Promise.all(workers.map((worker) => limiter(() => worker())))
|
||||
.then(async () => {
|
||||
if (!runOptions.dryRun) {
|
||||
await esClient.refresh(runOptions.apm ? ['metrics-apm.service-*'] : []);
|
||||
}
|
||||
})
|
||||
.then(() => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.table(workerProcessed);
|
||||
logger.info(`Finished producing ${totalProcessed} events`);
|
||||
});
|
||||
const workerServices = range(0, intervals.length).map((index) => runService(intervals[index]));
|
||||
|
||||
return Promise.all(workerServices);
|
||||
}
|
||||
|
|
|
@ -6,81 +6,78 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { partition } from 'lodash';
|
||||
import { timerange } from '@kbn/apm-synthtrace-client';
|
||||
import { castArray } from 'lodash';
|
||||
import { PassThrough, Readable, Writable } from 'stream';
|
||||
import { isGeneratorObject } from 'util/types';
|
||||
import { awaitStream } from '../../lib/utils/wait_until_stream_finished';
|
||||
import { bootstrap } from './bootstrap';
|
||||
import { getScenario } from './get_scenario';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
import { ApmFields } from '../../lib/apm/apm_fields';
|
||||
import { ApmSynthtraceEsClient } from '../../lib/apm';
|
||||
import { Logger } from '../../lib/utils/create_logger';
|
||||
import { EntityArrayIterable } from '../../lib/entity_iterable';
|
||||
import { StreamProcessor } from '../../lib/stream_processor';
|
||||
import { ApmSynthtraceApmClient } from '../../lib/apm/client/apm_synthtrace_apm_client';
|
||||
|
||||
export async function startLiveDataUpload(
|
||||
esClient: ApmSynthtraceEsClient,
|
||||
apmIntakeClient: ApmSynthtraceApmClient | null,
|
||||
logger: Logger,
|
||||
runOptions: RunOptions,
|
||||
start: Date,
|
||||
version: string
|
||||
) {
|
||||
export async function startLiveDataUpload({
|
||||
runOptions,
|
||||
start,
|
||||
}: {
|
||||
runOptions: RunOptions;
|
||||
start: Date;
|
||||
}) {
|
||||
const file = runOptions.file;
|
||||
|
||||
const scenario = await getScenario({ file, logger });
|
||||
const { generate, mapToIndex } = await scenario(runOptions);
|
||||
const { logger, apmEsClient } = await bootstrap(runOptions);
|
||||
|
||||
const scenario = await getScenario({ file, logger });
|
||||
const { generate } = await scenario({ ...runOptions, logger });
|
||||
|
||||
let queuedEvents: ApmFields[] = [];
|
||||
let requestedUntil: Date = start;
|
||||
const bucketSizeInMs = 1000 * 60;
|
||||
let requestedUntil = start;
|
||||
|
||||
const stream = new PassThrough({
|
||||
objectMode: true,
|
||||
});
|
||||
|
||||
apmEsClient.index(stream);
|
||||
|
||||
function closeStream() {
|
||||
stream.end(() => {
|
||||
process.exit(0);
|
||||
});
|
||||
}
|
||||
|
||||
process.on('SIGINT', closeStream);
|
||||
process.on('SIGTERM', closeStream);
|
||||
process.on('SIGQUIT', closeStream);
|
||||
|
||||
async function uploadNextBatch() {
|
||||
const end = new Date();
|
||||
if (end > requestedUntil) {
|
||||
const now = Date.now();
|
||||
|
||||
if (now > requestedUntil.getTime()) {
|
||||
const bucketFrom = requestedUntil;
|
||||
const bucketTo = new Date(requestedUntil.getTime() + bucketSizeInMs);
|
||||
// TODO this materializes into an array, assumption is that the live buffer will fit in memory
|
||||
const nextEvents = logger.perf('execute_scenario', () =>
|
||||
generate({ from: bucketFrom, to: bucketTo }).toArray()
|
||||
);
|
||||
|
||||
logger.info(
|
||||
`Requesting ${new Date(bucketFrom).toISOString()} to ${new Date(
|
||||
bucketTo
|
||||
).toISOString()}, events: ${nextEvents.length}`
|
||||
`Requesting ${new Date(bucketFrom).toISOString()} to ${new Date(bucketTo).toISOString()}`
|
||||
);
|
||||
queuedEvents.push(...nextEvents);
|
||||
|
||||
const next = logger.perf('execute_scenario', () =>
|
||||
generate({ range: timerange(bucketFrom.getTime(), bucketTo.getTime()) })
|
||||
);
|
||||
|
||||
const concatenatedStream = castArray(next)
|
||||
.reverse()
|
||||
.reduce<Writable>((prev, current) => {
|
||||
const currentStream = isGeneratorObject(current) ? Readable.from(current) : current;
|
||||
return currentStream.pipe(prev);
|
||||
}, new PassThrough({ objectMode: true }));
|
||||
|
||||
concatenatedStream.pipe(stream, { end: false });
|
||||
|
||||
await awaitStream(concatenatedStream);
|
||||
|
||||
await apmEsClient.refresh();
|
||||
|
||||
requestedUntil = bucketTo;
|
||||
}
|
||||
|
||||
const [eventsToUpload, eventsToRemainInQueue] = partition(
|
||||
queuedEvents,
|
||||
(event) => event['@timestamp'] !== undefined && event['@timestamp'] <= end.getTime()
|
||||
);
|
||||
|
||||
logger.info(`Uploading until ${new Date(end).toISOString()}, events: ${eventsToUpload.length}`);
|
||||
|
||||
queuedEvents = eventsToRemainInQueue;
|
||||
const streamProcessor = new StreamProcessor({
|
||||
version,
|
||||
logger,
|
||||
processors: StreamProcessor.apmProcessors,
|
||||
maxSourceEvents: runOptions.maxDocs,
|
||||
name: `Live index`,
|
||||
});
|
||||
await logger.perf('index_live_scenario', async () => {
|
||||
const events = new EntityArrayIterable(eventsToUpload);
|
||||
const streamToBulkOptions = {
|
||||
concurrency: runOptions.workers,
|
||||
maxDocs: runOptions.maxDocs,
|
||||
mapToIndex,
|
||||
dryRun: false,
|
||||
};
|
||||
if (apmIntakeClient) {
|
||||
await apmIntakeClient.index(events, streamToBulkOptions, streamProcessor);
|
||||
} else {
|
||||
await esClient.index(events, streamToBulkOptions, streamProcessor);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
do {
|
||||
|
|
|
@ -5,100 +5,82 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
// import pLimit from 'p-limit';
|
||||
import { workerData, parentPort } from 'worker_threads';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
import { parentPort, workerData } from 'worker_threads';
|
||||
import pidusage from 'pidusage';
|
||||
import { memoryUsage } from 'process';
|
||||
import { timerange } from '@kbn/apm-synthtrace-client';
|
||||
import { getEsClient } from './get_es_client';
|
||||
import { getScenario } from './get_scenario';
|
||||
import { StreamToBulkOptions } from '../../lib/apm/client/apm_synthtrace_es_client';
|
||||
import { getCommonServices } from './get_common_services';
|
||||
import { LogLevel } from '../../lib/utils/create_logger';
|
||||
import { StreamProcessor } from '../../lib/stream_processor';
|
||||
import { Scenario } from '../scenario';
|
||||
import { EntityIterable, Fields } from '../../..';
|
||||
import { StreamAggregator } from '../../lib/stream_aggregator';
|
||||
import { ServicMetricsAggregator } from '../../lib/apm/aggregators/service_metrics_aggregator';
|
||||
|
||||
// logging proxy to main thread, ensures we see real time logging
|
||||
const l = {
|
||||
perf: <T extends any>(name: string, cb: () => T): T => {
|
||||
return cb();
|
||||
},
|
||||
debug: (...args: any[]) => parentPort?.postMessage({ log: LogLevel.debug, args }),
|
||||
info: (...args: any[]) => parentPort?.postMessage({ log: LogLevel.info, args }),
|
||||
error: (...args: any[]) => parentPort?.postMessage({ log: LogLevel.error, args }),
|
||||
};
|
||||
import { loggerProxy } from './logger_proxy';
|
||||
import { RunOptions } from './parse_run_cli_flags';
|
||||
|
||||
export interface WorkerData {
|
||||
bucketFrom: Date;
|
||||
bucketTo: Date;
|
||||
runOptions: RunOptions;
|
||||
workerIndex: number;
|
||||
workerId: string;
|
||||
esUrl: string;
|
||||
version: string;
|
||||
}
|
||||
|
||||
const { bucketFrom, bucketTo, runOptions, workerIndex, version } = workerData as WorkerData;
|
||||
const { bucketFrom, bucketTo, runOptions, esUrl, version } = workerData as WorkerData;
|
||||
|
||||
const { logger, apmEsClient, apmIntakeClient } = getCommonServices(runOptions, l);
|
||||
const file = runOptions.file;
|
||||
let scenario: Scenario<Fields>;
|
||||
let events: EntityIterable<Fields>;
|
||||
let streamToBulkOptions: StreamToBulkOptions;
|
||||
let streamProcessor: StreamProcessor;
|
||||
|
||||
async function setup() {
|
||||
scenario = await logger.perf('get_scenario', () => getScenario({ file, logger }));
|
||||
const { generate, mapToIndex } = await scenario(runOptions);
|
||||
|
||||
events = logger.perf('generate_scenario', () => generate({ from: bucketFrom, to: bucketTo }));
|
||||
streamToBulkOptions = {
|
||||
maxDocs: runOptions.maxDocs,
|
||||
mapToIndex,
|
||||
dryRun: !!runOptions.dryRun,
|
||||
};
|
||||
streamToBulkOptions.itemStartStopCallback = (item, done) => {
|
||||
if (!item) return;
|
||||
if (!done) {
|
||||
parentPort?.postMessage({ workerIndex, firstTimestamp: item['@timestamp'] });
|
||||
} else {
|
||||
parentPort?.postMessage({ workerIndex, lastTimestamp: item['@timestamp'] });
|
||||
}
|
||||
};
|
||||
const aggregators: StreamAggregator[] = [new ServicMetricsAggregator()];
|
||||
// If we are sending data to apm-server we do not have to create any aggregates in the stream processor
|
||||
streamProcessor = new StreamProcessor({
|
||||
async function start() {
|
||||
const logger = loggerProxy;
|
||||
const apmEsClient = getEsClient({
|
||||
concurrency: runOptions.concurrency,
|
||||
target: esUrl,
|
||||
logger,
|
||||
version,
|
||||
processors: apmIntakeClient ? [] : StreamProcessor.apmProcessors,
|
||||
streamAggregators: apmIntakeClient ? [] : aggregators,
|
||||
maxSourceEvents: runOptions.maxDocs,
|
||||
logger: l,
|
||||
processedCallback: (processedDocuments) => {
|
||||
parentPort!.postMessage({ workerIndex, processedDocuments });
|
||||
},
|
||||
name: `Worker ${workerIndex}`,
|
||||
});
|
||||
}
|
||||
|
||||
async function doWork() {
|
||||
const file = runOptions.file;
|
||||
|
||||
const scenario = await logger.perf('get_scenario', () => getScenario({ file, logger }));
|
||||
|
||||
logger.info(`Running scenario from ${bucketFrom.toISOString()} to ${bucketTo.toISOString()}`);
|
||||
|
||||
const { generate, bootstrap } = await scenario({ ...runOptions, logger });
|
||||
|
||||
if (bootstrap) {
|
||||
await bootstrap({ apmEsClient });
|
||||
}
|
||||
|
||||
logger.debug('Generating scenario');
|
||||
|
||||
const generators = logger.perf('generate_scenario', () =>
|
||||
generate({ range: timerange(bucketFrom, bucketTo) })
|
||||
);
|
||||
|
||||
logger.debug('Indexing scenario');
|
||||
|
||||
function mb(value: number): string {
|
||||
return Math.round(value / 1024 ** 2).toString() + 'mb';
|
||||
}
|
||||
|
||||
setInterval(async () => {
|
||||
const stats = await pidusage(process.pid);
|
||||
const mem = memoryUsage();
|
||||
logger.info(`cpu: ${stats.cpu}, memory: ${mb(mem.heapUsed)}/${mb(mem.heapTotal)}`);
|
||||
}, 5000);
|
||||
|
||||
await logger.perf('index_scenario', async () => {
|
||||
if (apmIntakeClient) {
|
||||
await apmIntakeClient.index(events, streamToBulkOptions, streamProcessor);
|
||||
} else {
|
||||
await apmEsClient.index(events, streamToBulkOptions, streamProcessor);
|
||||
}
|
||||
await apmEsClient.index(generators);
|
||||
await apmEsClient.refresh();
|
||||
});
|
||||
}
|
||||
|
||||
parentPort!.on('message', async (message) => {
|
||||
if (message === 'setup') {
|
||||
await setup();
|
||||
parentPort!.on('message', (message) => {
|
||||
if (message !== 'start') {
|
||||
return;
|
||||
}
|
||||
if (message === 'start') {
|
||||
try {
|
||||
await doWork();
|
||||
|
||||
start()
|
||||
.then(() => {
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
l.info(error);
|
||||
process.exit(2);
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
loggerProxy.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -5,15 +5,7 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { ApmFields } from '@kbn/apm-synthtrace-client';
|
||||
import { createMetricAggregatorFactory } from '../../utils/create_metric_aggregator_factory';
|
||||
|
||||
import { Serializable } from '../serializable';
|
||||
import { StackMonitoringFields } from './stack_monitoring_fields';
|
||||
import { KibanaStats } from './kibana_stats';
|
||||
|
||||
export class Kibana extends Serializable<StackMonitoringFields> {
|
||||
stats() {
|
||||
return new KibanaStats({
|
||||
...this.fields,
|
||||
});
|
||||
}
|
||||
}
|
||||
export const createApmMetricAggregator = createMetricAggregatorFactory<ApmFields>();
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { ApmFields } from '@kbn/apm-synthtrace-client';
|
||||
import { identity, negate } from 'lodash';
|
||||
import { createFilterTransform, fork } from '../../utils/stream_utils';
|
||||
import { createApmMetricAggregator } from './create_apm_metric_aggregator';
|
||||
|
||||
const filter = (event: ApmFields) =>
|
||||
event['processor.event'] === 'metric' && event['metricset.name'] === 'span_breakdown';
|
||||
|
||||
export function createBreakdownMetricsAggregator(flushInterval: string) {
|
||||
const dropProcessedEventsStream = createFilterTransform(negate(filter));
|
||||
|
||||
const aggregatorStream = createApmMetricAggregator(
|
||||
{
|
||||
filter,
|
||||
getAggregateKey: (event) => {
|
||||
return event.meta!['metricset.id'];
|
||||
},
|
||||
flushInterval,
|
||||
init: (event) => {
|
||||
return {
|
||||
...event,
|
||||
meta: {},
|
||||
'span.self_time.count': 0,
|
||||
'span.self_time.sum.us': 0,
|
||||
};
|
||||
},
|
||||
},
|
||||
(metric, event) => {
|
||||
metric['span.self_time.count'] += event['span.self_time.count']!;
|
||||
metric['span.self_time.sum.us'] += event['span.self_time.sum.us']!;
|
||||
},
|
||||
identity
|
||||
);
|
||||
|
||||
const mergedStreams = fork(dropProcessedEventsStream, aggregatorStream);
|
||||
|
||||
return mergedStreams;
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { pick } from 'lodash';
|
||||
import { hashKeysOf, ApmFields } from '@kbn/apm-synthtrace-client';
|
||||
import { createLosslessHistogram } from '../../utils/create_lossless_histogram';
|
||||
import { createApmMetricAggregator } from './create_apm_metric_aggregator';
|
||||
|
||||
const KEY_FIELDS: Array<keyof ApmFields> = [
|
||||
'agent.name',
|
||||
'service.environment',
|
||||
'service.name',
|
||||
'service.node.name',
|
||||
'transaction.type',
|
||||
];
|
||||
|
||||
export function createServiceMetricsAggregator(flushInterval: string) {
|
||||
return createApmMetricAggregator(
|
||||
{
|
||||
filter: (event) => true,
|
||||
getAggregateKey: (event) => {
|
||||
// see https://github.com/elastic/apm-server/blob/main/x-pack/apm-server/aggregation/txmetrics/aggregator.go
|
||||
return hashKeysOf(event, KEY_FIELDS);
|
||||
},
|
||||
flushInterval,
|
||||
init: (event) => {
|
||||
const set = pick(event, KEY_FIELDS);
|
||||
|
||||
return {
|
||||
...set,
|
||||
'metricset.name': 'service',
|
||||
'processor.event': 'metric',
|
||||
'processor.name': 'metric',
|
||||
'transaction.duration.histogram': createLosslessHistogram(),
|
||||
'transaction.duration.summary': {
|
||||
min: 0,
|
||||
max: 0,
|
||||
value_count: 0,
|
||||
sum: 0,
|
||||
},
|
||||
'event.outcome_numeric': {
|
||||
sum: 0,
|
||||
value_count: 0,
|
||||
},
|
||||
};
|
||||
},
|
||||
},
|
||||
(metric, event) => {
|
||||
if (event['processor.event'] === 'transaction') {
|
||||
const duration = event['transaction.duration.us']!;
|
||||
|
||||
metric['transaction.duration.histogram'].record(duration);
|
||||
|
||||
if (event['event.outcome'] === 'success' || event['event.outcome'] === 'failure') {
|
||||
metric['event.outcome_numeric'].value_count += 1;
|
||||
}
|
||||
|
||||
if (event['event.outcome'] === 'success') {
|
||||
metric['event.outcome_numeric'].sum += 1;
|
||||
}
|
||||
|
||||
const summary = metric['transaction.duration.summary'];
|
||||
|
||||
summary.min = Math.min(duration, metric['transaction.duration.summary'].min);
|
||||
summary.max = Math.max(duration, metric['transaction.duration.summary'].max);
|
||||
summary.sum += duration;
|
||||
summary.value_count += 1;
|
||||
}
|
||||
},
|
||||
(metric) => {
|
||||
const serialized = metric['transaction.duration.histogram'].serialize();
|
||||
metric['transaction.duration.histogram'] = {
|
||||
// @ts-expect-error
|
||||
values: serialized.values,
|
||||
counts: serialized.counts,
|
||||
};
|
||||
// @ts-expect-error
|
||||
metric._doc_count = serialized.total;
|
||||
return metric;
|
||||
}
|
||||
);
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { pick } from 'lodash';
|
||||
import { ApmFields, hashKeysOf } from '@kbn/apm-synthtrace-client';
|
||||
import { createApmMetricAggregator } from './create_apm_metric_aggregator';
|
||||
|
||||
const KEY_FIELDS: Array<keyof ApmFields> = [
|
||||
'agent.name',
|
||||
'service.name',
|
||||
'service.environment',
|
||||
'span.destination.service.resource',
|
||||
'event.outcome',
|
||||
'span.name',
|
||||
'service.target.name',
|
||||
'service.target.type',
|
||||
];
|
||||
|
||||
export function createSpanMetricsAggregator(flushInterval: string) {
|
||||
return createApmMetricAggregator(
|
||||
{
|
||||
filter: (event) =>
|
||||
event['processor.event'] === 'span' && !!event['span.destination.service.resource'],
|
||||
getAggregateKey: (event) => {
|
||||
// see https://github.com/elastic/apm-server/blob/main/x-pack/apm-server/aggregation/spanmetrics/aggregator.go
|
||||
const key = hashKeysOf(event, KEY_FIELDS);
|
||||
return key;
|
||||
},
|
||||
flushInterval,
|
||||
init: (event) => {
|
||||
const set = pick(event, KEY_FIELDS);
|
||||
|
||||
return {
|
||||
...set,
|
||||
'metricset.name': 'service_destination',
|
||||
'processor.event': 'metric',
|
||||
'processor.name': 'metric',
|
||||
'span.destination.service.response_time.count': 0,
|
||||
'span.destination.service.response_time.sum.us': 0,
|
||||
};
|
||||
},
|
||||
},
|
||||
(metric, event) => {
|
||||
metric['span.destination.service.response_time.count'] += 1;
|
||||
metric['span.destination.service.response_time.sum.us'] += event['span.duration.us']!;
|
||||
},
|
||||
(metric) => {
|
||||
// @ts-expect-error
|
||||
metric._doc_count = metric['span.destination.service.response_time.count'];
|
||||
return metric;
|
||||
}
|
||||
);
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { ApmFields, appendHash, hashKeysOf } from '@kbn/apm-synthtrace-client';
|
||||
import { pick } from 'lodash';
|
||||
import { createLosslessHistogram } from '../../utils/create_lossless_histogram';
|
||||
import { createApmMetricAggregator } from './create_apm_metric_aggregator';
|
||||
|
||||
const KEY_FIELDS: Array<keyof ApmFields> = [
|
||||
'transaction.name',
|
||||
'transaction.result',
|
||||
'transaction.type',
|
||||
'event.outcome',
|
||||
|
||||
'agent.name',
|
||||
'service.environment',
|
||||
'service.name',
|
||||
'service.version',
|
||||
'service.node.name',
|
||||
'service.runtime.name',
|
||||
'service.runtime.version',
|
||||
|
||||
'service.language.name',
|
||||
'service.language.version',
|
||||
|
||||
'host.hostname',
|
||||
'host.name',
|
||||
'host.os.platform',
|
||||
'container.id',
|
||||
'kubernetes.pod.name',
|
||||
|
||||
'cloud.provider',
|
||||
'cloud.region',
|
||||
'cloud.availability_zone',
|
||||
'cloud.service.name',
|
||||
'cloud.account.id',
|
||||
'cloud.account.name',
|
||||
'cloud.project.id',
|
||||
'cloud.project.name',
|
||||
'cloud.machine.type',
|
||||
|
||||
'faas.coldstart',
|
||||
'faas.id',
|
||||
'faas.trigger.type',
|
||||
'faas.name',
|
||||
'faas.version',
|
||||
];
|
||||
|
||||
export function createTransactionMetricsAggregator(flushInterval: string) {
|
||||
return createApmMetricAggregator(
|
||||
{
|
||||
filter: (event) => event['processor.event'] === 'transaction',
|
||||
getAggregateKey: (event) => {
|
||||
// see https://github.com/elastic/apm-server/blob/main/x-pack/apm-server/aggregation/txmetrics/aggregator.go
|
||||
let key = hashKeysOf(event, KEY_FIELDS);
|
||||
key = appendHash(key, event['parent.id'] ? '1' : '0');
|
||||
return key;
|
||||
},
|
||||
flushInterval,
|
||||
init: (event) => {
|
||||
const set = pick(event, KEY_FIELDS);
|
||||
|
||||
return {
|
||||
...set,
|
||||
'metricset.name': 'transaction',
|
||||
'processor.event': 'metric',
|
||||
'processor.name': 'metric',
|
||||
'transaction.root': !event['parent.id'],
|
||||
'transaction.duration.histogram': createLosslessHistogram(),
|
||||
};
|
||||
},
|
||||
},
|
||||
(metric, event) => {
|
||||
metric['transaction.duration.histogram'].record(event['transaction.duration.us']!);
|
||||
},
|
||||
(metric) => {
|
||||
const serialized = metric['transaction.duration.histogram'].serialize();
|
||||
metric['transaction.duration.histogram'] = {
|
||||
// @ts-expect-error
|
||||
values: serialized.values,
|
||||
counts: serialized.counts,
|
||||
};
|
||||
// @ts-expect-error
|
||||
metric._doc_count = serialized.total;
|
||||
return metric;
|
||||
}
|
||||
);
|
||||
}
|
|
@ -1,215 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { random } from 'lodash';
|
||||
import { Client } from '@elastic/elasticsearch';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { Fields } from '../../entity';
|
||||
import { StreamAggregator } from '../../stream_aggregator';
|
||||
|
||||
type AggregationState = {
|
||||
count: number;
|
||||
min: number;
|
||||
max: number;
|
||||
sum: number;
|
||||
timestamp: number;
|
||||
failure_count: number;
|
||||
success_count: number;
|
||||
} & Pick<ApmFields, 'service.name' | 'service.environment' | 'transaction.type'>;
|
||||
|
||||
export type ServiceFields = Fields &
|
||||
Pick<
|
||||
ApmFields,
|
||||
| 'timestamp.us'
|
||||
| 'ecs.version'
|
||||
| 'metricset.name'
|
||||
| 'observer'
|
||||
| 'processor.event'
|
||||
| 'processor.name'
|
||||
| 'service.name'
|
||||
| 'service.version'
|
||||
| 'service.environment'
|
||||
> &
|
||||
Partial<{
|
||||
_doc_count: number;
|
||||
transaction: {
|
||||
failure_count: number;
|
||||
success_count: number;
|
||||
type: string;
|
||||
'duration.summary': {
|
||||
min: number;
|
||||
max: number;
|
||||
sum: number;
|
||||
value_count: number;
|
||||
};
|
||||
};
|
||||
}>;
|
||||
|
||||
export class ServicMetricsAggregator implements StreamAggregator<ApmFields> {
|
||||
public readonly name;
|
||||
|
||||
constructor() {
|
||||
this.name = 'service-latency';
|
||||
}
|
||||
|
||||
getDataStreamName(): string {
|
||||
return 'metrics-apm.service';
|
||||
}
|
||||
|
||||
getMappings(): Record<string, any> {
|
||||
return {
|
||||
properties: {
|
||||
'@timestamp': {
|
||||
type: 'date',
|
||||
format: 'date_optional_time||epoch_millis',
|
||||
},
|
||||
transaction: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: { type: 'keyword', time_series_dimension: true },
|
||||
duration: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
summary: {
|
||||
type: 'aggregate_metric_double',
|
||||
metrics: ['min', 'max', 'sum', 'value_count'],
|
||||
default_metric: 'sum',
|
||||
time_series_metric: 'gauge',
|
||||
},
|
||||
},
|
||||
},
|
||||
failure_count: {
|
||||
type: 'long',
|
||||
},
|
||||
success_count: {
|
||||
type: 'long',
|
||||
},
|
||||
},
|
||||
},
|
||||
service: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'keyword', time_series_dimension: true },
|
||||
environment: { type: 'keyword', time_series_dimension: true },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
getDimensions(): string[] {
|
||||
return ['service.name', 'service.environment', 'transaction.type'];
|
||||
}
|
||||
|
||||
getWriteTarget(document: Record<string, any>): string | null {
|
||||
const eventType = document.metricset?.name;
|
||||
if (eventType === 'service') return 'metrics-apm.service-default';
|
||||
return null;
|
||||
}
|
||||
|
||||
private state: Record<string, AggregationState> = {};
|
||||
|
||||
private processedComponent: number = 0;
|
||||
|
||||
process(event: ApmFields): Fields[] | null {
|
||||
if (!event['@timestamp']) return null;
|
||||
const service = event['service.name']!;
|
||||
const environment = event['service.environment'] ?? 'production';
|
||||
const transactionType = event['transaction.type'] ?? 'request';
|
||||
const key = `${service}-${environment}-${transactionType}`;
|
||||
const addToState = (timestamp: number) => {
|
||||
if (!this.state[key]) {
|
||||
this.state[key] = {
|
||||
timestamp,
|
||||
count: 0,
|
||||
min: 0,
|
||||
max: 0,
|
||||
sum: 0,
|
||||
'service.name': service,
|
||||
'service.environment': environment,
|
||||
'transaction.type': transactionType,
|
||||
failure_count: 0,
|
||||
success_count: 0,
|
||||
};
|
||||
}
|
||||
|
||||
const state = this.state[key];
|
||||
|
||||
const duration = Number(event['transaction.duration.us']);
|
||||
|
||||
if (duration >= 0) {
|
||||
state.count++;
|
||||
|
||||
state.sum += duration;
|
||||
if (duration > state.max) state.max = duration;
|
||||
if (duration < state.min) state.min = Math.min(0, duration);
|
||||
|
||||
switch (event['event.outcome']) {
|
||||
case 'failure':
|
||||
state.failure_count++;
|
||||
break;
|
||||
case 'success':
|
||||
state.success_count++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// ensure we flush current state first if event falls out of the current max window age
|
||||
if (this.state[key]) {
|
||||
const diff = Math.abs(event['@timestamp'] - this.state[key].timestamp);
|
||||
if (diff >= 1000 * 60) {
|
||||
const fields = this.createServiceFields(key);
|
||||
delete this.state[key];
|
||||
addToState(event['@timestamp']);
|
||||
return [fields];
|
||||
}
|
||||
}
|
||||
|
||||
addToState(event['@timestamp']);
|
||||
// if cardinality is too high force emit of current state
|
||||
if (Object.keys(this.state).length === 1000) {
|
||||
return this.flush();
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
flush(): Fields[] {
|
||||
const fields = Object.keys(this.state).map((key) => this.createServiceFields(key));
|
||||
this.state = {};
|
||||
return fields;
|
||||
}
|
||||
|
||||
private createServiceFields(key: string): ServiceFields {
|
||||
this.processedComponent = ++this.processedComponent % 1000;
|
||||
const component = Date.now() % 100;
|
||||
const state = this.state[key];
|
||||
return {
|
||||
_doc_count: state.count,
|
||||
'@timestamp': state.timestamp + random(0, 100) + component + this.processedComponent,
|
||||
'metricset.name': 'service',
|
||||
'processor.event': 'metric',
|
||||
'service.name': state['service.name'],
|
||||
'service.environment': state['service.environment'],
|
||||
transaction: {
|
||||
'duration.summary': {
|
||||
min: state.min,
|
||||
max: state.max,
|
||||
sum: state.sum,
|
||||
value_count: state.count,
|
||||
},
|
||||
success_count: state.success_count,
|
||||
failure_count: state.failure_count,
|
||||
type: state['transaction.type'] ?? 'request',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async bootstrapElasticsearch(esClient: Client): Promise<void> {}
|
||||
}
|
|
@ -1,344 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import Client from 'elastic-apm-http-client';
|
||||
import Util from 'util';
|
||||
import { Logger } from '../../utils/create_logger';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { EntityIterable } from '../../entity_iterable';
|
||||
import { StreamProcessor } from '../../stream_processor';
|
||||
import { EntityStreams } from '../../entity_streams';
|
||||
import { Fields } from '../../entity';
|
||||
import { Span } from './intake_v2/span';
|
||||
import { Error } from './intake_v2/error';
|
||||
import { Metadata } from './intake_v2/metadata';
|
||||
import { Transaction } from './intake_v2/transaction';
|
||||
|
||||
export interface StreamToBulkOptions<TFields extends Fields = ApmFields> {
|
||||
concurrency?: number;
|
||||
// the maximum number of documents to process
|
||||
maxDocs?: number;
|
||||
// the number of documents to flush the bulk operation defaults to 10k
|
||||
flushInterval?: number;
|
||||
mapToIndex?: (document: Record<string, any>) => string;
|
||||
dryRun: boolean;
|
||||
itemStartStopCallback?: (item: TFields | null, done: boolean) => void;
|
||||
}
|
||||
|
||||
export interface ApmSynthtraceApmClientOptions {
|
||||
forceLegacyIndices?: boolean;
|
||||
// defaults to true if unspecified
|
||||
refreshAfterIndex?: boolean;
|
||||
}
|
||||
|
||||
interface ClientState {
|
||||
client: Client;
|
||||
enqueued: number;
|
||||
sendSpan: (s: Span) => Promise<void>;
|
||||
sendTransaction: (s: Transaction) => Promise<void>;
|
||||
sendError: (e: Error) => Promise<void>;
|
||||
flush: (o: any) => Promise<void>;
|
||||
}
|
||||
interface ClientStats {
|
||||
numEvents: number;
|
||||
numEventsDropped: number;
|
||||
numEventsEnqueued: number;
|
||||
numEventsSent: number;
|
||||
slowWriteBatch: number;
|
||||
backoffReconnectCount: number;
|
||||
}
|
||||
export class ApmSynthtraceApmClient {
|
||||
private readonly _serviceClients: Map<string, ClientState> = new Map<string, ClientState>();
|
||||
constructor(
|
||||
private readonly apmTarget: string,
|
||||
private readonly logger: Logger,
|
||||
options?: ApmSynthtraceApmClientOptions
|
||||
) {}
|
||||
|
||||
map(fields: ApmFields): [Span | Transaction, Error[]] {
|
||||
const set = <T extends keyof ApmFields, K>(
|
||||
key: T,
|
||||
context: NonNullable<K>,
|
||||
setter: (context: NonNullable<K>, value: NonNullable<ApmFields[T]>) => void
|
||||
) => {
|
||||
if (fields[key]) {
|
||||
setter(context, fields[key]!);
|
||||
}
|
||||
};
|
||||
const metadata: Metadata = {
|
||||
service: {
|
||||
name: fields['service.name'] ?? 'unknown',
|
||||
agent: {
|
||||
name: fields['agent.name'] ?? 'unknown',
|
||||
version: fields['agent.version'] ?? 'unknown',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const system = (metadata.system = metadata.system ?? {});
|
||||
const container = (system.container = system.container ?? {});
|
||||
const kubernetes = (system.kubernetes = system.kubernetes ?? {});
|
||||
const pod = (kubernetes.pod = kubernetes.pod ?? {});
|
||||
set('container.id', container, (c, v) => (c.id = v));
|
||||
set('host.name', system, (c, v) => (c.hostname = v));
|
||||
set('host.hostname', system, (c, v) => (c.configured_hostname = v));
|
||||
set('kubernetes.pod.name', pod, (c, v) => (c.name = v));
|
||||
set('kubernetes.pod.uid', pod, (c, v) => (c.uid = v));
|
||||
|
||||
const e: Span | Transaction = fields['span.id']
|
||||
? {
|
||||
kind: 'span',
|
||||
duration: fields['span.duration.us'] ?? 0,
|
||||
id: fields['span.id'] ?? '',
|
||||
name: fields['span.name'] ?? 'unknown',
|
||||
parent_id: fields['parent.id'] ?? '',
|
||||
type: fields['span.type'] ?? '',
|
||||
timestamp: Math.trunc((fields['@timestamp'] ?? 0) * 1000),
|
||||
trace_id: fields['trace.id'] ?? '',
|
||||
}
|
||||
: {
|
||||
kind: 'transaction',
|
||||
timestamp: Math.trunc((fields['@timestamp'] ?? 0) * 1000),
|
||||
duration: fields['transaction.duration.us'] ?? 0,
|
||||
id: fields['transaction.id'] ?? '',
|
||||
type: fields['transaction.type'] ?? '',
|
||||
trace_id: fields['trace.id'] ?? '',
|
||||
span_count: { dropped: null, started: 0 },
|
||||
};
|
||||
|
||||
set('trace.id', e, (c, v) => (c.trace_id = v));
|
||||
set('parent.id', e, (c, v) => (c.parent_id = v));
|
||||
set(
|
||||
'span.links',
|
||||
e,
|
||||
(c, v) => (c.links = v.map((l) => ({ span_id: l.span?.id, trace_id: l.span?.id })))
|
||||
);
|
||||
|
||||
e.context = {};
|
||||
const service = (e.context.service = e.context.service ?? {});
|
||||
set('service.name', service, (c, v) => (c.name = v));
|
||||
set('service.version', service, (c, v) => (c.version = v));
|
||||
set('service.environment', service, (c, v) => (c.environment = v));
|
||||
const node = (service.node = service.node ?? {});
|
||||
set('service.node.name', node, (c, v) => (c.configured_name = v));
|
||||
const agent = (service.agent = service.agent ?? {});
|
||||
set('agent.name', agent, (c, v) => (c.name = v));
|
||||
set('agent.version', agent, (c, v) => (c.version = v));
|
||||
const runtime = (service.runtime = service.runtime ?? {});
|
||||
set('service.runtime.name', runtime, (c, v) => (c.name = v));
|
||||
set('service.runtime.version', runtime, (c, v) => (c.version = v));
|
||||
const framework = (service.framework = service.framework ?? {});
|
||||
set('service.framework.name', framework, (c, v) => (c.name = v));
|
||||
|
||||
set(
|
||||
'event.outcome',
|
||||
e,
|
||||
(c, v) => (c.outcome = v === 'failure' ? 'failure' : v === 'success' ? 'success' : 'unknown')
|
||||
);
|
||||
|
||||
if (e.kind === 'span') {
|
||||
set('span.duration.us', e, (c, v) => (c.duration = v / 1000));
|
||||
set('span.type', e, (c, v) => (c.type = v));
|
||||
set('span.subtype', e, (c, v) => (c.subtype = v));
|
||||
|
||||
const destination = (e.context.destination = e.context.destination ?? {});
|
||||
const destinationService = (destination.service = destination.service ?? { resource: '' });
|
||||
set('span.destination.service.resource', destinationService, (c, v) => (c.resource = v));
|
||||
}
|
||||
if (e.kind === 'transaction') {
|
||||
set('transaction.name', e, (c, v) => (c.name = v));
|
||||
set('transaction.type', e, (c, v) => (c.type = v));
|
||||
set('transaction.id', e, (c, v) => (c.id = v));
|
||||
set('transaction.duration.us', e, (c, v) => (c.duration = v / 1000));
|
||||
set('transaction.sampled', e, (c, v) => (c.sampled = v));
|
||||
}
|
||||
|
||||
let errors: Error[] = [];
|
||||
if (fields['error.id']) {
|
||||
const exceptions = fields['error.exception'] ?? [];
|
||||
errors = exceptions.map((ex) => {
|
||||
const err: Error = {
|
||||
id: '0',
|
||||
timestamp: Math.trunc((fields['@timestamp'] ?? 0) * 1000),
|
||||
context: e.context,
|
||||
};
|
||||
set('error.id', err, (c, v) => (c.id = v));
|
||||
set('parent.id', err, (c, v) => (c.parent_id = v));
|
||||
set('trace.id', err, (c, v) => (c.trace_id = v));
|
||||
set('transaction.id', err, (c, v) => (c.transaction_id = v));
|
||||
set('error.grouping_name', err, (c, v) => (c.culprit = v));
|
||||
err.exception = {
|
||||
message: ex.message,
|
||||
type: 'Exception',
|
||||
};
|
||||
if (!err.parent_id) err.parent_id = err.transaction_id ?? err.trace_id;
|
||||
return err;
|
||||
});
|
||||
}
|
||||
|
||||
// TODO include event more context
|
||||
// 'cloud.provider': string;
|
||||
// 'cloud.project.name': string;
|
||||
// 'cloud.service.name': string;
|
||||
// 'cloud.availability_zone': string;
|
||||
// 'cloud.machine.type': string;
|
||||
// 'cloud.region': string;
|
||||
// 'host.os.platform': string;
|
||||
// 'faas.id': string;
|
||||
// 'faas.coldstart': boolean;
|
||||
// 'faas.execution': string;
|
||||
// 'faas.trigger.type': string;
|
||||
// 'faas.trigger.request_id': string;
|
||||
|
||||
return [e, errors];
|
||||
}
|
||||
|
||||
async index<TFields>(
|
||||
events: EntityIterable<TFields> | Array<EntityIterable<TFields>>,
|
||||
options?: StreamToBulkOptions,
|
||||
streamProcessor?: StreamProcessor
|
||||
) {
|
||||
const dataStream = Array.isArray(events) ? new EntityStreams(events) : events;
|
||||
const sp =
|
||||
streamProcessor != null
|
||||
? streamProcessor
|
||||
: new StreamProcessor({
|
||||
processors: [],
|
||||
maxSourceEvents: options?.maxDocs,
|
||||
logger: this.logger,
|
||||
});
|
||||
|
||||
let yielded = 0;
|
||||
let fields: ApmFields | null = null;
|
||||
// intentionally leaks `fields` so it can be pushed to callback events
|
||||
const sideEffectYield = () =>
|
||||
sp.streamToDocumentAsync((e) => {
|
||||
fields = e;
|
||||
return this.map(e);
|
||||
}, dataStream);
|
||||
|
||||
if (options?.dryRun) {
|
||||
await this.logger.perf('enumerate_scenario', async () => {
|
||||
// @ts-ignore
|
||||
// We just want to enumerate
|
||||
for await (const item of sideEffectYield()) {
|
||||
if (yielded === 0) {
|
||||
options.itemStartStopCallback?.apply(this, [fields, false]);
|
||||
yielded++;
|
||||
}
|
||||
}
|
||||
options.itemStartStopCallback?.apply(this, [fields, true]);
|
||||
});
|
||||
return;
|
||||
}
|
||||
const queueSize = 10000;
|
||||
for await (const [item, _] of sideEffectYield()) {
|
||||
if (item == null) continue;
|
||||
|
||||
const service = item.context?.service?.name ?? 'unknown';
|
||||
const hostName = fields ? fields['host.name'] : 'unknown';
|
||||
// TODO evaluate if we really need service specific clients
|
||||
// const lookup = `${service}::${hostName}`;
|
||||
const lookup = `constant_key::1`;
|
||||
if (!this._serviceClients.has(lookup)) {
|
||||
const client = new Client({
|
||||
userAgent: `apm-agent-synthtrace/${sp.version}`,
|
||||
serverUrl: this.apmTarget,
|
||||
maxQueueSize: queueSize,
|
||||
bufferWindowSize: queueSize / 2,
|
||||
|
||||
serviceName: service,
|
||||
serviceNodeName: service,
|
||||
agentName: 'synthtrace',
|
||||
agentVersion: sp.version,
|
||||
serviceVersion: item.context?.service?.version ?? sp.version,
|
||||
frameworkName: item.context?.service?.framework?.name ?? undefined,
|
||||
frameworkVersion: item.context?.service?.framework?.version ?? undefined,
|
||||
hostname: hostName,
|
||||
});
|
||||
this._serviceClients.set(lookup, {
|
||||
client,
|
||||
enqueued: 0,
|
||||
sendSpan: Util.promisify(client.sendSpan).bind(client),
|
||||
sendTransaction: Util.promisify(client.sendTransaction).bind(client),
|
||||
sendError: Util.promisify(client.sendError).bind(client),
|
||||
flush: Util.promisify(client.flush).bind(client),
|
||||
});
|
||||
}
|
||||
const clientState = this._serviceClients.get(lookup)!;
|
||||
|
||||
if (yielded === 0) {
|
||||
options?.itemStartStopCallback?.apply(this, [fields, false]);
|
||||
}
|
||||
if (item.kind === 'span') {
|
||||
clientState.sendSpan(item);
|
||||
} else if (item.kind === 'transaction') {
|
||||
clientState.sendTransaction(item);
|
||||
}
|
||||
yielded++;
|
||||
clientState.enqueued++;
|
||||
/* TODO finish implementing sending errors
|
||||
errors.forEach((e) => {
|
||||
clientState.sendError(e);
|
||||
clientState.enqueued++;
|
||||
});*/
|
||||
if (clientState.enqueued % queueSize === 0) {
|
||||
this.logger.debug(
|
||||
` -- ${sp.name} Flushing client: ${lookup} after enqueueing ${clientState.enqueued}`
|
||||
);
|
||||
await clientState.flush({});
|
||||
}
|
||||
}
|
||||
for (const [, state] of this._serviceClients) {
|
||||
await state.flush({});
|
||||
}
|
||||
// this attempts to group similar service names together for cleaner reporting
|
||||
const totals = Array.from(this._serviceClients).reduce((p, c, i, a) => {
|
||||
const serviceName = c[0].split('::')[0].replace(/-\d+$/, '');
|
||||
if (!p.has(serviceName)) {
|
||||
p.set(serviceName, { enqueued: 0, sent: 0, names: new Set<string>() });
|
||||
}
|
||||
const s = p.get(serviceName)!;
|
||||
s.enqueued += c[1].enqueued;
|
||||
s.sent += c[1].client.sent;
|
||||
s.names.add(c[0]);
|
||||
const stats = c[1].client._getStats();
|
||||
if (!s.stats) {
|
||||
s.stats = stats;
|
||||
} else {
|
||||
s.stats.backoffReconnectCount += stats.backoffReconnectCount;
|
||||
s.stats.numEvents += stats.numEvents;
|
||||
s.stats.numEventsSent += stats.numEventsSent;
|
||||
s.stats.numEventsDropped += stats.numEventsDropped;
|
||||
s.stats.numEventsEnqueued += stats.numEventsEnqueued;
|
||||
s.stats.slowWriteBatch += stats.slowWriteBatch;
|
||||
}
|
||||
return p;
|
||||
}, new Map<string, { enqueued: number; sent: number; names: Set<string>; stats?: ClientStats }>());
|
||||
for (const [serviceGroup, state] of totals) {
|
||||
// only report details if there is a discrepancy in the bookkeeping of synthtrace and the client
|
||||
if (
|
||||
state.stats &&
|
||||
(state.stats.numEventsDropped > 0 || state.enqueued !== state.stats.numEventsSent)
|
||||
) {
|
||||
this.logger.info(
|
||||
` -- ${serviceGroup} (${state.names.size} services) sent: ${state.sent}, enqueued: ${state.enqueued}`
|
||||
);
|
||||
this.logger.info(` -- ${serviceGroup} (${state.names.size} services) client stats`);
|
||||
this.logger.info(` -- numEvents: ${state.stats.numEvents}`);
|
||||
this.logger.info(` -- numEventsSent: ${state.stats.numEventsSent}`);
|
||||
this.logger.info(` -- numEventsEnqueued: ${state.stats.numEventsEnqueued}`);
|
||||
this.logger.info(` -- numEventsDropped: ${state.stats.numEventsDropped}`);
|
||||
this.logger.info(` -- backoffReconnectCount: ${state.stats.backoffReconnectCount}`);
|
||||
this.logger.info(` -- slowWriteBatch: ${state.stats.slowWriteBatch}`);
|
||||
}
|
||||
}
|
||||
|
||||
options?.itemStartStopCallback?.apply(this, [fields, true]);
|
||||
}
|
||||
}
|
|
@ -1,277 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Client } from '@elastic/elasticsearch';
|
||||
import { IndicesIndexSettings } from '@elastic/elasticsearch/lib/api/types';
|
||||
import { cleanWriteTargets } from '../../utils/clean_write_targets';
|
||||
import { getApmWriteTargets } from '../utils/get_apm_write_targets';
|
||||
import { Logger } from '../../utils/create_logger';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { EntityIterable } from '../../entity_iterable';
|
||||
import { StreamProcessor } from '../../stream_processor';
|
||||
import { EntityStreams } from '../../entity_streams';
|
||||
import { Fields } from '../../entity';
|
||||
import { StreamAggregator } from '../../stream_aggregator';
|
||||
|
||||
export interface StreamToBulkOptions<TFields extends Fields = ApmFields> {
|
||||
concurrency?: number;
|
||||
// the maximum number of documents to process
|
||||
maxDocs?: number;
|
||||
// the number of documents to flush the bulk operation defaults to 10k
|
||||
flushInterval?: number;
|
||||
mapToIndex?: (document: Record<string, any>) => string;
|
||||
dryRun: boolean;
|
||||
itemStartStopCallback?: (item: TFields | null, done: boolean) => void;
|
||||
}
|
||||
|
||||
export interface ApmSynthtraceEsClientOptions {
|
||||
forceLegacyIndices?: boolean;
|
||||
// defaults to true if unspecified
|
||||
refreshAfterIndex?: boolean;
|
||||
}
|
||||
|
||||
export class ApmSynthtraceEsClient {
|
||||
private readonly forceLegacyIndices: boolean;
|
||||
private readonly refreshAfterIndex: boolean;
|
||||
constructor(
|
||||
private readonly client: Client,
|
||||
private readonly logger: Logger,
|
||||
options?: ApmSynthtraceEsClientOptions
|
||||
) {
|
||||
this.forceLegacyIndices = options?.forceLegacyIndices ?? false;
|
||||
this.refreshAfterIndex = options?.refreshAfterIndex ?? true;
|
||||
}
|
||||
|
||||
private getWriteTargets() {
|
||||
return getApmWriteTargets({
|
||||
client: this.client,
|
||||
forceLegacyIndices: this.forceLegacyIndices,
|
||||
});
|
||||
}
|
||||
|
||||
async runningVersion() {
|
||||
const info = await this.client.info();
|
||||
return info.version.number;
|
||||
}
|
||||
|
||||
async clean(dataStreams?: string[]) {
|
||||
return this.getWriteTargets().then(async (writeTargets) => {
|
||||
const indices = Object.values(writeTargets);
|
||||
this.logger.info(`Attempting to clean: ${indices} + ${dataStreams ?? []}`);
|
||||
if (this.forceLegacyIndices) {
|
||||
return cleanWriteTargets({
|
||||
client: this.client,
|
||||
targets: indices,
|
||||
logger: this.logger,
|
||||
});
|
||||
}
|
||||
for (const name of indices.concat(dataStreams ?? [])) {
|
||||
const dataStream = await this.client.indices.getDataStream({ name }, { ignore: [404] });
|
||||
if (dataStream.data_streams && dataStream.data_streams.length > 0) {
|
||||
this.logger.debug(`Deleting datastream: ${name}`);
|
||||
await this.client.indices.deleteDataStream({ name });
|
||||
}
|
||||
}
|
||||
return;
|
||||
});
|
||||
}
|
||||
|
||||
async updateComponentTemplates(numberOfPrimaryShards: number) {
|
||||
const response = await this.client.cluster.getComponentTemplate({ name: '*apm*@custom' });
|
||||
for (const componentTemplate of response.component_templates) {
|
||||
if (componentTemplate.component_template._meta?.package?.name !== 'apm') continue;
|
||||
|
||||
componentTemplate.component_template.template.settings = {
|
||||
index: {
|
||||
number_of_shards: numberOfPrimaryShards,
|
||||
},
|
||||
};
|
||||
|
||||
const putTemplate = await this.client.cluster.putComponentTemplate({
|
||||
name: componentTemplate.name,
|
||||
...componentTemplate.component_template,
|
||||
});
|
||||
this.logger.info(
|
||||
`- updated component template ${componentTemplate.name}, acknowledged: ${putTemplate.acknowledged}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async registerGcpRepository(connectionString: string) {
|
||||
// <client_name>:<bucket>[:base_path]
|
||||
const [clientName, bucket, basePath] = connectionString.split(':');
|
||||
if (!clientName)
|
||||
throw new Error(
|
||||
`client name is mandatory for gcp repostitory registration: ${connectionString}`
|
||||
);
|
||||
if (!bucket)
|
||||
throw new Error(`bucket is mandatory for gcp repostitory registration: ${connectionString}`);
|
||||
|
||||
const name = `gcp-repository-${clientName}`;
|
||||
this.logger.info(`Registering gcp repository ${name}`);
|
||||
const putRepository = await this.client.snapshot.createRepository({
|
||||
name,
|
||||
type: 'gcs',
|
||||
settings: {
|
||||
// @ts-ignore
|
||||
// missing from es types
|
||||
bucket,
|
||||
client: clientName,
|
||||
base_path: basePath,
|
||||
},
|
||||
});
|
||||
this.logger.info(putRepository);
|
||||
|
||||
this.logger.info(`Verifying gcp repository ${name}`);
|
||||
const verifyRepository = await this.client.snapshot.verifyRepository({ name });
|
||||
this.logger.info(verifyRepository);
|
||||
}
|
||||
|
||||
async refresh(dataStreams?: string[]) {
|
||||
const writeTargets = await this.getWriteTargets();
|
||||
|
||||
const indices = Object.values(writeTargets).concat(dataStreams ?? []);
|
||||
this.logger.info(`Indexed all data attempting to refresh: ${indices}`);
|
||||
|
||||
return this.client.indices.refresh({
|
||||
index: indices,
|
||||
allow_no_indices: true,
|
||||
ignore_unavailable: true,
|
||||
});
|
||||
}
|
||||
|
||||
async index<TFields>(
|
||||
events: EntityIterable<TFields> | Array<EntityIterable<TFields>>,
|
||||
options?: StreamToBulkOptions,
|
||||
streamProcessor?: StreamProcessor
|
||||
) {
|
||||
const dataStream = Array.isArray(events) ? new EntityStreams(events) : events;
|
||||
const sp =
|
||||
streamProcessor != null
|
||||
? streamProcessor
|
||||
: new StreamProcessor({
|
||||
processors: StreamProcessor.apmProcessors,
|
||||
maxSourceEvents: options?.maxDocs,
|
||||
logger: this.logger,
|
||||
});
|
||||
|
||||
let item: Record<any, any> | null = null;
|
||||
let yielded = 0;
|
||||
if (options?.dryRun) {
|
||||
await this.logger.perf('enumerate_scenario', async () => {
|
||||
// @ts-ignore
|
||||
// We just want to enumerate
|
||||
for await (item of sp.streamToDocumentAsync((e) => sp.toDocument(e), dataStream)) {
|
||||
if (yielded === 0) {
|
||||
options.itemStartStopCallback?.apply(this, [item, false]);
|
||||
yielded++;
|
||||
}
|
||||
}
|
||||
options.itemStartStopCallback?.apply(this, [item, true]);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const writeTargets = await this.getWriteTargets();
|
||||
// TODO logger.perf
|
||||
await this.client.helpers.bulk<ApmFields>({
|
||||
concurrency: options?.concurrency ?? 10,
|
||||
refresh: false,
|
||||
refreshOnCompletion: false,
|
||||
flushBytes: 500000,
|
||||
// TODO https://github.com/elastic/elasticsearch-js/issues/1610
|
||||
// having to map here is awkward, it'd be better to map just before serialization.
|
||||
datasource: sp.streamToDocumentAsync((e) => sp.toDocument(e), dataStream),
|
||||
onDrop: (doc) => {
|
||||
this.logger.info(JSON.stringify(doc, null, 2));
|
||||
},
|
||||
// TODO bug in client not passing generic to BulkHelperOptions<>
|
||||
// https://github.com/elastic/elasticsearch-js/issues/1611
|
||||
onDocument: (doc: unknown) => {
|
||||
item = doc as Record<string, any>;
|
||||
if (yielded === 0) {
|
||||
options?.itemStartStopCallback?.apply(this, [item, false]);
|
||||
yielded++;
|
||||
}
|
||||
let index = options?.mapToIndex ? options?.mapToIndex(item) : null;
|
||||
if (!index) {
|
||||
index = !this.forceLegacyIndices
|
||||
? sp.getDataStreamForEvent(item, writeTargets)
|
||||
: StreamProcessor.getIndexForEvent(item, writeTargets);
|
||||
}
|
||||
return { create: { _index: index } };
|
||||
},
|
||||
});
|
||||
options?.itemStartStopCallback?.apply(this, [item, true]);
|
||||
|
||||
if (this.refreshAfterIndex) {
|
||||
await this.refresh();
|
||||
}
|
||||
}
|
||||
|
||||
async createDataStream(aggregator: StreamAggregator) {
|
||||
const datastreamName = aggregator.getDataStreamName();
|
||||
const mappings = aggregator.getMappings();
|
||||
const dimensions = aggregator.getDimensions();
|
||||
|
||||
const indexSettings: IndicesIndexSettings = { lifecycle: { name: 'metrics' } };
|
||||
if (dimensions.length > 0) {
|
||||
indexSettings.mode = 'time_series';
|
||||
indexSettings.routing_path = dimensions;
|
||||
}
|
||||
|
||||
await this.client.cluster.putComponentTemplate({
|
||||
name: `${datastreamName}-mappings`,
|
||||
template: {
|
||||
mappings,
|
||||
},
|
||||
_meta: {
|
||||
description: `Mappings for ${datastreamName}-*`,
|
||||
},
|
||||
});
|
||||
this.logger.info(`Created mapping component template for ${datastreamName}-*`);
|
||||
|
||||
await this.client.cluster.putComponentTemplate({
|
||||
name: `${datastreamName}-settings`,
|
||||
template: {
|
||||
settings: {
|
||||
index: indexSettings,
|
||||
},
|
||||
},
|
||||
_meta: {
|
||||
description: `Settings for ${datastreamName}-*`,
|
||||
},
|
||||
});
|
||||
this.logger.info(`Created settings component template for ${datastreamName}-*`);
|
||||
|
||||
await this.client.indices.putIndexTemplate({
|
||||
name: `${datastreamName}-index_template`,
|
||||
index_patterns: [`${datastreamName}-*`],
|
||||
data_stream: {},
|
||||
composed_of: [`${datastreamName}-mappings`, `${datastreamName}-settings`],
|
||||
priority: 500,
|
||||
});
|
||||
this.logger.info(`Created index template for ${datastreamName}-*`);
|
||||
|
||||
const dataStreamWithNamespace = datastreamName + '-default';
|
||||
const getDataStreamResponse = await this.client.indices.getDataStream(
|
||||
{
|
||||
name: dataStreamWithNamespace,
|
||||
},
|
||||
{ ignore: [404] }
|
||||
);
|
||||
if (getDataStreamResponse.data_streams && getDataStreamResponse.data_streams.length === 0) {
|
||||
await this.client.indices.createDataStream({ name: dataStreamWithNamespace });
|
||||
this.logger.info(`Created data stream: ${dataStreamWithNamespace}.`);
|
||||
} else {
|
||||
this.logger.info(`Data stream: ${dataStreamWithNamespace} already exists.`);
|
||||
}
|
||||
|
||||
await aggregator.bootstrapElasticsearch(this.client);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Transform } from 'stream';
|
||||
import { ApmFields } from '@kbn/apm-synthtrace-client';
|
||||
|
||||
export function getApmServerMetadataTransform(version: string) {
|
||||
const versionMajor = Number(version.split('.')[0]);
|
||||
|
||||
return new Transform({
|
||||
objectMode: true,
|
||||
transform(document: ApmFields, encoding, callback) {
|
||||
document['observer.type'] = 'synthtrace';
|
||||
document['observer.version'] = version;
|
||||
document['observer.version_major'] = versionMajor;
|
||||
callback(null, document);
|
||||
},
|
||||
});
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmFields } from '@kbn/apm-synthtrace-client';
|
||||
import { Transform } from 'stream';
|
||||
import { dedot } from '@kbn/apm-synthtrace-client';
|
||||
|
||||
export function getDedotTransform(keepFlattenedFields: boolean = false) {
|
||||
return new Transform({
|
||||
objectMode: true,
|
||||
transform(document: ApmFields, encoding, callback) {
|
||||
let target: Record<string, any>;
|
||||
|
||||
if (keepFlattenedFields) {
|
||||
// no need to dedot metric events, just document.observer
|
||||
// use it when you want to reduce CPU time
|
||||
// @ts-expect-error
|
||||
document.observer = {
|
||||
type: document['observer.type'],
|
||||
version: document['observer.version'],
|
||||
version_major: document['observer.version_major'],
|
||||
};
|
||||
delete document['observer.type'];
|
||||
delete document['observer.version'];
|
||||
delete document['observer.version_major'];
|
||||
|
||||
target = document['processor.event'] === 'metric' ? document : dedot(document, {});
|
||||
} else {
|
||||
target = dedot(document, {});
|
||||
}
|
||||
|
||||
delete target.meta;
|
||||
target['@timestamp'] = new Date(target['@timestamp']!).toISOString();
|
||||
|
||||
callback(null, target);
|
||||
},
|
||||
});
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmFields } from '@kbn/apm-synthtrace-client';
|
||||
import { Transform } from 'stream';
|
||||
|
||||
export function getIntakeDefaultsTransform() {
|
||||
return new Transform({
|
||||
objectMode: true,
|
||||
transform(document: ApmFields, encoding, callback) {
|
||||
document['service.node.name'] =
|
||||
document['service.node.name'] || document['container.id'] || document['host.name'];
|
||||
callback(null, document);
|
||||
},
|
||||
});
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmFields, ESDocumentWithOperation } from '@kbn/apm-synthtrace-client';
|
||||
import { Transform } from 'stream';
|
||||
|
||||
export function getRoutingTransform() {
|
||||
return new Transform({
|
||||
objectMode: true,
|
||||
transform(document: ESDocumentWithOperation<ApmFields>, encoding, callback) {
|
||||
let index: string | undefined;
|
||||
|
||||
switch (document['processor.event']) {
|
||||
case 'transaction':
|
||||
case 'span':
|
||||
index =
|
||||
document['agent.name'] === 'rum-js' ? 'traces-apm.rum-default' : 'traces-apm-default';
|
||||
break;
|
||||
|
||||
case 'error':
|
||||
index = 'logs-apm.error-default';
|
||||
break;
|
||||
|
||||
case 'metric':
|
||||
const metricsetName = document['metricset.name'];
|
||||
|
||||
if (metricsetName === 'app') {
|
||||
index = `metrics-apm.app.${document['service.name']}-default`;
|
||||
} else {
|
||||
index = `metrics-apm.internal-default`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!index) {
|
||||
const error = new Error('Cannot determine index for event');
|
||||
Object.assign(error, { document });
|
||||
}
|
||||
|
||||
document._index = index;
|
||||
|
||||
callback(null, document);
|
||||
},
|
||||
});
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmFields, Serializable } from '@kbn/apm-synthtrace-client';
|
||||
import { Transform } from 'stream';
|
||||
|
||||
export function getSerializeTransform() {
|
||||
const buffer: ApmFields[] = [];
|
||||
|
||||
let cb: (() => void) | undefined;
|
||||
|
||||
function push(stream: Transform, events: ApmFields[], callback?: () => void) {
|
||||
let event: ApmFields | undefined;
|
||||
while ((event = events.shift())) {
|
||||
if (!stream.push(event)) {
|
||||
buffer.push(...events);
|
||||
cb = callback;
|
||||
return;
|
||||
}
|
||||
}
|
||||
callback?.();
|
||||
}
|
||||
|
||||
return new Transform({
|
||||
objectMode: true,
|
||||
read() {
|
||||
if (cb) {
|
||||
const nextCallback = cb;
|
||||
cb = undefined;
|
||||
const nextEvents = [...buffer];
|
||||
buffer.length = 0;
|
||||
push(this, nextEvents, nextCallback);
|
||||
}
|
||||
},
|
||||
write(chunk: Serializable<ApmFields>, encoding, callback) {
|
||||
push(this, chunk.serialize(), callback);
|
||||
},
|
||||
});
|
||||
}
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Client, estypes } from '@elastic/elasticsearch';
|
||||
import {
|
||||
ApmFields,
|
||||
ESDocumentWithOperation,
|
||||
SynthtraceESAction,
|
||||
SynthtraceGenerator,
|
||||
} from '@kbn/apm-synthtrace-client';
|
||||
import { castArray } from 'lodash';
|
||||
import { PassThrough, pipeline, Readable, Transform } from 'stream';
|
||||
import { isGeneratorObject } from 'util/types';
|
||||
import { ValuesType } from 'utility-types';
|
||||
import { Logger } from '../../../utils/create_logger';
|
||||
import { fork, sequential } from '../../../utils/stream_utils';
|
||||
import { createBreakdownMetricsAggregator } from '../../aggregators/create_breakdown_metrics_aggregator';
|
||||
import { createSpanMetricsAggregator } from '../../aggregators/create_span_metrics_aggregator';
|
||||
import { createTransactionMetricsAggregator } from '../../aggregators/create_transaction_metrics_aggregator';
|
||||
import { getApmServerMetadataTransform } from './get_apm_server_metadata_transform';
|
||||
import { getDedotTransform } from './get_dedot_transform';
|
||||
import { getIntakeDefaultsTransform } from './get_intake_defaults_transform';
|
||||
import { getRoutingTransform } from './get_routing_transform';
|
||||
import { getSerializeTransform } from './get_serialize_transform';
|
||||
|
||||
export interface ApmSynthtraceEsClientOptions {
|
||||
version: string;
|
||||
concurrency?: number;
|
||||
refreshAfterIndex?: boolean;
|
||||
}
|
||||
|
||||
type MaybeArray<T> = T | T[];
|
||||
|
||||
const DATA_STREAMS = ['traces-apm*', 'metrics-apm*', 'logs-apm*'];
|
||||
|
||||
export enum ComponentTemplateName {
|
||||
LogsApp = 'logs-apm.app@custom',
|
||||
LogsError = 'logs-apm.error@custom',
|
||||
MetricsApp = 'metrics-apm.app@custom',
|
||||
MetricsInternal = 'metrics-apm.internal@custom',
|
||||
TracesApm = 'traces-apm@custom',
|
||||
TracesApmRum = 'traces-apm.rum@custom',
|
||||
TracesApmSampled = 'traces-apm.sampled@custom',
|
||||
}
|
||||
|
||||
export class ApmSynthtraceEsClient {
|
||||
private readonly client: Client;
|
||||
private readonly logger: Logger;
|
||||
|
||||
private readonly concurrency: number;
|
||||
|
||||
private readonly refreshAfterIndex: boolean;
|
||||
|
||||
private readonly version: string;
|
||||
|
||||
private pipelineCallback: (base: Readable) => NodeJS.WritableStream = this.getDefaultPipeline();
|
||||
|
||||
constructor(options: { client: Client; logger: Logger } & ApmSynthtraceEsClientOptions) {
|
||||
this.client = options.client;
|
||||
this.logger = options.logger;
|
||||
this.concurrency = options.concurrency ?? 1;
|
||||
this.refreshAfterIndex = options.refreshAfterIndex ?? false;
|
||||
this.version = options.version;
|
||||
}
|
||||
|
||||
async clean() {
|
||||
this.logger.info(`Cleaning APM data streams ${DATA_STREAMS.join(', ')}`);
|
||||
|
||||
for (const name of DATA_STREAMS) {
|
||||
const dataStream = await this.client.indices.getDataStream({ name }, { ignore: [404] });
|
||||
if (dataStream.data_streams && dataStream.data_streams.length > 0) {
|
||||
this.logger.debug(`Deleting datastream: ${name}`);
|
||||
await this.client.indices.deleteDataStream({ name });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async updateComponentTemplate(
|
||||
name: ComponentTemplateName,
|
||||
modify: (
|
||||
template: ValuesType<
|
||||
estypes.ClusterGetComponentTemplateResponse['component_templates']
|
||||
>['component_template']['template']
|
||||
) => estypes.ClusterPutComponentTemplateRequest['template']
|
||||
) {
|
||||
const response = await this.client.cluster.getComponentTemplate({
|
||||
name,
|
||||
});
|
||||
|
||||
const template = response.component_templates[0];
|
||||
|
||||
await this.client.cluster.putComponentTemplate({
|
||||
name,
|
||||
template: {
|
||||
...modify(template.component_template.template),
|
||||
},
|
||||
});
|
||||
|
||||
this.logger.info(`Updated component template: ${name}`);
|
||||
}
|
||||
|
||||
async refresh(dataStreams: string[] = DATA_STREAMS) {
|
||||
this.logger.info(`Refreshing ${dataStreams.join(',')}`);
|
||||
|
||||
return this.client.indices.refresh({
|
||||
index: dataStreams,
|
||||
allow_no_indices: true,
|
||||
ignore_unavailable: true,
|
||||
});
|
||||
}
|
||||
|
||||
getDefaultPipeline(includeSerialization: boolean = true) {
|
||||
return (base: Readable) => {
|
||||
const aggregators = [
|
||||
createTransactionMetricsAggregator('1m'),
|
||||
createSpanMetricsAggregator('1m'),
|
||||
];
|
||||
|
||||
const serializationTransform = includeSerialization ? [getSerializeTransform()] : [];
|
||||
|
||||
return pipeline(
|
||||
// @ts-expect-error Some weird stuff here with the type definition for pipeline. We have tests!
|
||||
base,
|
||||
...serializationTransform,
|
||||
getIntakeDefaultsTransform(),
|
||||
fork(new PassThrough({ objectMode: true }), ...aggregators),
|
||||
createBreakdownMetricsAggregator('30s'),
|
||||
getApmServerMetadataTransform(this.version),
|
||||
getRoutingTransform(),
|
||||
getDedotTransform(),
|
||||
(err) => {
|
||||
if (err) {
|
||||
this.logger.error(err);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
pipeline(cb: (base: Readable) => NodeJS.WritableStream) {
|
||||
this.pipelineCallback = cb;
|
||||
}
|
||||
|
||||
getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
async index(streamOrGenerator: MaybeArray<Readable | SynthtraceGenerator<ApmFields>>) {
|
||||
this.logger.debug(`Bulk indexing ${castArray(streamOrGenerator).length} stream(s)`);
|
||||
|
||||
const allStreams = castArray(streamOrGenerator).map((obj) => {
|
||||
const base = isGeneratorObject(obj) ? Readable.from(obj) : obj;
|
||||
|
||||
return this.pipelineCallback(base);
|
||||
}) as Transform[];
|
||||
|
||||
let count: number = 0;
|
||||
|
||||
const stream = sequential(...allStreams);
|
||||
|
||||
await this.client.helpers.bulk({
|
||||
concurrency: this.concurrency,
|
||||
refresh: false,
|
||||
refreshOnCompletion: false,
|
||||
flushBytes: 250000,
|
||||
datasource: stream,
|
||||
filter_path: 'errors,items.*.error,items.*.status',
|
||||
onDocument: (doc: ESDocumentWithOperation<ApmFields>) => {
|
||||
let action: SynthtraceESAction;
|
||||
count++;
|
||||
|
||||
if (count % 100000 === 0) {
|
||||
this.logger.info(`Indexed ${count} documents`);
|
||||
} else if (count % 1000 === 0) {
|
||||
this.logger.debug(`Indexed ${count} documents`);
|
||||
}
|
||||
|
||||
if (doc._action) {
|
||||
action = doc._action!;
|
||||
delete doc._action;
|
||||
} else if (doc._index) {
|
||||
action = { create: { _index: doc._index } };
|
||||
delete doc._index;
|
||||
} else {
|
||||
this.logger.debug(doc);
|
||||
throw new Error(
|
||||
`Could not determine operation: _index and _action not defined in document`
|
||||
);
|
||||
}
|
||||
|
||||
return action;
|
||||
},
|
||||
onDrop: (doc) => {
|
||||
this.logger.error(`Dropped document: ${JSON.stringify(doc, null, 2)}`);
|
||||
},
|
||||
});
|
||||
|
||||
this.logger.info(`Produced ${count} events`);
|
||||
|
||||
if (this.refreshAfterIndex) {
|
||||
await this.refresh();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -10,94 +10,57 @@ import fetch from 'node-fetch';
|
|||
import { Logger } from '../../utils/create_logger';
|
||||
|
||||
export class ApmSynthtraceKibanaClient {
|
||||
constructor(private readonly logger: Logger) {}
|
||||
private readonly logger: Logger;
|
||||
private target: string;
|
||||
|
||||
async migrateCloudToManagedApm(cloudId: string, username: string, password: string) {
|
||||
await this.logger.perf('migrate_apm_on_cloud', async () => {
|
||||
this.logger.info('attempting to migrate cloud instance over to managed APM');
|
||||
const cloudUrls = Buffer.from(cloudId.split(':')[1], 'base64').toString().split('$');
|
||||
const kibanaCloudUrl = `https://${cloudUrls[2]}.${cloudUrls[0]}`;
|
||||
const response = await fetch(
|
||||
kibanaCloudUrl + '/internal/apm/fleet/cloud_apm_package_policy',
|
||||
{
|
||||
method: 'POST', // *GET, POST, PUT, DELETE, etc.
|
||||
headers: {
|
||||
Authorization: 'Basic ' + Buffer.from(username + ':' + password).toString('base64'),
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
'kbn-xsrf': 'kibana',
|
||||
},
|
||||
}
|
||||
);
|
||||
const responseJson = await response.json();
|
||||
if (responseJson.message) {
|
||||
this.logger.info(`Cloud Instance already migrated to managed APM: ${responseJson.message}`);
|
||||
}
|
||||
if (responseJson.cloudApmPackagePolicy) {
|
||||
this.logger.info(
|
||||
`Cloud Instance migrated to managed APM: ${responseJson.cloudApmPackagePolicy.package.version}`
|
||||
);
|
||||
}
|
||||
});
|
||||
constructor(options: { logger: Logger; target: string }) {
|
||||
this.logger = options.logger;
|
||||
this.target = options.target;
|
||||
}
|
||||
|
||||
async discoverLocalKibana() {
|
||||
return await fetch('http://localhost:5601', {
|
||||
method: 'HEAD',
|
||||
follow: 1,
|
||||
redirect: 'manual',
|
||||
}).then((res) => {
|
||||
const kibanaUrl = res.headers.get('location');
|
||||
this.logger.info(`Discovered local kibana running at: ${kibanaUrl}`);
|
||||
return kibanaUrl;
|
||||
});
|
||||
}
|
||||
|
||||
async fetchLatestApmPackageVersion(
|
||||
kibanaUrl: string,
|
||||
version: string,
|
||||
username: string,
|
||||
password: string
|
||||
) {
|
||||
const url = `${kibanaUrl}/api/fleet/epm/packages/apm`;
|
||||
const response = await fetch(url, {
|
||||
async fetchLatestApmPackageVersion() {
|
||||
this.logger.debug(`Fetching latest APM package version`);
|
||||
const fleetPackageApiUrl = `${this.target}/api/fleet/epm/packages/apm`;
|
||||
const response = await fetch(fleetPackageApiUrl, {
|
||||
method: 'GET',
|
||||
headers: kibanaHeaders(username, password),
|
||||
headers: kibanaHeaders(),
|
||||
});
|
||||
const json = (await response.json()) as { item: { latestVersion: string } };
|
||||
const { latestVersion } = json.item;
|
||||
return latestVersion;
|
||||
|
||||
const responseJson = await response.json();
|
||||
|
||||
if (response.status !== 200) {
|
||||
throw new Error(
|
||||
`Failed to fetch latest APM package version, received HTTP ${response.status} and message: ${responseJson.message}`
|
||||
);
|
||||
}
|
||||
|
||||
const { latestVersion } = responseJson.item;
|
||||
return latestVersion as string;
|
||||
}
|
||||
|
||||
async installApmPackage(kibanaUrl: string, version: string, username: string, password: string) {
|
||||
const packageVersion = await this.fetchLatestApmPackageVersion(
|
||||
kibanaUrl,
|
||||
version,
|
||||
username,
|
||||
password
|
||||
);
|
||||
const response = await fetch(`${kibanaUrl}/api/fleet/epm/packages/apm/${packageVersion}`, {
|
||||
async installApmPackage(packageVersion: string) {
|
||||
this.logger.debug(`Installing APM package ${packageVersion}`);
|
||||
|
||||
const response = await fetch(`${this.target}/api/fleet/epm/packages/apm/${packageVersion}`, {
|
||||
method: 'POST',
|
||||
headers: kibanaHeaders(username, password),
|
||||
headers: kibanaHeaders(),
|
||||
body: '{"force":true}',
|
||||
});
|
||||
|
||||
const responseJson = await response.json();
|
||||
|
||||
if (responseJson.statusCode) {
|
||||
throw Error(
|
||||
`unable to install apm package ${packageVersion}. Received status code: ${responseJson.statusCode} and message: ${responseJson.message}`
|
||||
if (!responseJson.items) {
|
||||
throw new Error(
|
||||
`Failed to install APM package version ${packageVersion}, received HTTP ${response.status} and message: ${responseJson.message}`
|
||||
);
|
||||
}
|
||||
if (responseJson.items) {
|
||||
this.logger.info(`Installed apm package ${packageVersion}`);
|
||||
} else this.logger.error(responseJson);
|
||||
|
||||
this.logger.info(`Installed APM package ${packageVersion}`);
|
||||
}
|
||||
}
|
||||
|
||||
function kibanaHeaders(username: string, password: string) {
|
||||
function kibanaHeaders() {
|
||||
return {
|
||||
Authorization: 'Basic ' + Buffer.from(username + ':' + password).toString('base64'),
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
'kbn-xsrf': 'kibana',
|
||||
|
|
|
@ -1,513 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
/**
|
||||
* errorEvent represents an error or a logged error message, captured by an APM agent in a monitored service.
|
||||
*/
|
||||
export interface Error {
|
||||
/**
|
||||
* Context holds arbitrary contextual information for the event.
|
||||
*/
|
||||
context?: null | {
|
||||
/**
|
||||
* Cloud holds fields related to the cloud or infrastructure the events are coming from.
|
||||
*/
|
||||
cloud?: null | {
|
||||
/**
|
||||
* Origin contains the self-nested field groups for cloud.
|
||||
*/
|
||||
origin?: null | {
|
||||
/**
|
||||
* The cloud account or organization id used to identify different entities in a multi-tenant environment.
|
||||
*/
|
||||
account?: null | {
|
||||
/**
|
||||
* The cloud account or organization id used to identify different entities in a multi-tenant environment.
|
||||
*/
|
||||
id?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Name of the cloud provider.
|
||||
*/
|
||||
provider?: null | string;
|
||||
/**
|
||||
* Region in which this host, resource, or service is located.
|
||||
*/
|
||||
region?: null | string;
|
||||
/**
|
||||
* The cloud service name is intended to distinguish services running on different platforms within a provider.
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* The cloud service name is intended to distinguish services running on different platforms within a provider.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.
|
||||
*/
|
||||
custom?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Message holds details related to message receiving and publishing if the captured event integrates with a messaging system
|
||||
*/
|
||||
message?: null | {
|
||||
/**
|
||||
* Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.
|
||||
*/
|
||||
age?: null | {
|
||||
/**
|
||||
* Age of the message in milliseconds.
|
||||
*/
|
||||
ms?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Body of the received message, similar to an HTTP request body
|
||||
*/
|
||||
body?: null | string;
|
||||
/**
|
||||
* Headers received with the message, similar to HTTP request headers.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* Queue holds information about the message queue where the message is received.
|
||||
*/
|
||||
queue?: null | {
|
||||
/**
|
||||
* Name holds the name of the message queue where the message is received.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* RoutingKey holds the optional routing key of the received message as set on the queuing system, such as in RabbitMQ.
|
||||
*/
|
||||
routing_key?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Page holds information related to the current page and page referers. It is only sent from RUM agents.
|
||||
*/
|
||||
page?: null | {
|
||||
/**
|
||||
* Referer holds the URL of the page that 'linked' to the current page.
|
||||
*/
|
||||
referer?: null | string;
|
||||
/**
|
||||
* URL of the current page
|
||||
*/
|
||||
url?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Request describes the HTTP request information in case the event was created as a result of an HTTP request.
|
||||
*/
|
||||
request?: null | {
|
||||
/**
|
||||
* Body only contais the request bod, not the query string information. It can either be a dictionary (for standard HTTP requests) or a raw request body.
|
||||
*/
|
||||
body?:
|
||||
| null
|
||||
| string
|
||||
| {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Cookies used by the request, parsed as key-value objects.
|
||||
*/
|
||||
cookies?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Env holds environment variable information passed to the monitored service.
|
||||
*/
|
||||
env?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* HTTPVersion holds information about the used HTTP version.
|
||||
*/
|
||||
http_version?: null | string;
|
||||
/**
|
||||
* Method holds information about the method of the HTTP request.
|
||||
*/
|
||||
method: string;
|
||||
/**
|
||||
* Socket holds information related to the recorded request, such as whether or not data were encrypted and the remote address.
|
||||
*/
|
||||
socket?: null | {
|
||||
/**
|
||||
* Encrypted indicates whether a request was sent as TLS/HTTPS request. DEPRECATED: this field will be removed in a future release.
|
||||
*/
|
||||
encrypted?: null | boolean;
|
||||
/**
|
||||
* RemoteAddress holds the network address sending the request. It should be obtained through standard APIs and not be parsed from any headers like 'Forwarded'.
|
||||
*/
|
||||
remote_address?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* URL holds information sucha as the raw URL, scheme, host and path.
|
||||
*/
|
||||
url?: null | {
|
||||
/**
|
||||
* Full, possibly agent-assembled URL of the request, e.g. https://example.com:443/search?q=elasticsearch#top.
|
||||
*/
|
||||
full?: null | string;
|
||||
/**
|
||||
* Hash of the request URL, e.g. 'top'
|
||||
*/
|
||||
hash?: null | string;
|
||||
/**
|
||||
* Hostname information of the request, e.g. 'example.com'."
|
||||
*/
|
||||
hostname?: null | string;
|
||||
/**
|
||||
* Path of the request, e.g. '/search'
|
||||
*/
|
||||
pathname?: null | string;
|
||||
/**
|
||||
* Port of the request, e.g. '443'. Can be sent as string or int.
|
||||
*/
|
||||
port?: null | string | number;
|
||||
/**
|
||||
* Protocol information for the recorded request, e.g. 'https:'.
|
||||
*/
|
||||
protocol?: null | string;
|
||||
/**
|
||||
* Raw unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.
|
||||
*/
|
||||
raw?: null | string;
|
||||
/**
|
||||
* Search contains the query string information of the request. It is expected to have values delimited by ampersands.
|
||||
*/
|
||||
search?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Response describes the HTTP response information in case the event was created as a result of an HTTP request.
|
||||
*/
|
||||
response?: null | {
|
||||
/**
|
||||
* DecodedBodySize holds the size of the decoded payload.
|
||||
*/
|
||||
decoded_body_size?: null | number;
|
||||
/**
|
||||
* EncodedBodySize holds the size of the encoded payload.
|
||||
*/
|
||||
encoded_body_size?: null | number;
|
||||
/**
|
||||
* Finished indicates whether the response was finished or not.
|
||||
*/
|
||||
finished?: null | boolean;
|
||||
/**
|
||||
* Headers holds the http headers sent in the http response.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* HeadersSent indicates whether http headers were sent.
|
||||
*/
|
||||
headers_sent?: null | boolean;
|
||||
/**
|
||||
* StatusCode sent in the http response.
|
||||
*/
|
||||
status_code?: null | number;
|
||||
/**
|
||||
* TransferSize holds the total size of the payload.
|
||||
*/
|
||||
transfer_size?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* Agent holds information about the APM agent capturing the event.
|
||||
*/
|
||||
agent?: null | {
|
||||
/**
|
||||
* EphemeralID is a free format ID used for metrics correlation by agents
|
||||
*/
|
||||
ephemeral_id?: null | string;
|
||||
/**
|
||||
* Name of the APM agent capturing information.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the APM agent capturing information.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Environment in which the monitored service is running, e.g. `production` or `staging`.
|
||||
*/
|
||||
environment?: null | string;
|
||||
/**
|
||||
* Framework holds information about the framework used in the monitored service.
|
||||
*/
|
||||
framework?: null | {
|
||||
/**
|
||||
* Name of the used framework
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used framework
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* ID holds a unique identifier for the service.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Language holds information about the programming language of the monitored service.
|
||||
*/
|
||||
language?: null | {
|
||||
/**
|
||||
* Name of the used programming language
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used programming language
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Name of the monitored service.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Node must be a unique meaningful name of the service node.
|
||||
*/
|
||||
node?: null | {
|
||||
/**
|
||||
* Name of the service node
|
||||
*/
|
||||
configured_name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Origin contains the self-nested field groups for service.
|
||||
*/
|
||||
origin?: null | {
|
||||
/**
|
||||
* Immutable id of the service emitting this event.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Immutable name of the service emitting this event.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* The version of the service the data was collected from.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Runtime holds information about the language runtime running the monitored service
|
||||
*/
|
||||
runtime?: null | {
|
||||
/**
|
||||
* Name of the language runtime
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the language runtime
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Target holds information about the outgoing service in case of an outgoing event
|
||||
*/
|
||||
target?: (
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
) &
|
||||
(
|
||||
| ((
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
) &
|
||||
null)
|
||||
| (
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
)
|
||||
);
|
||||
/**
|
||||
* Version of the monitored service.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.
|
||||
*/
|
||||
tags?: null | {
|
||||
[k: string]: null | string | boolean | number;
|
||||
};
|
||||
/**
|
||||
* User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.
|
||||
*/
|
||||
user?: null | {
|
||||
/**
|
||||
* Domain of the logged in user
|
||||
*/
|
||||
domain?: null | string;
|
||||
/**
|
||||
* Email of the user.
|
||||
*/
|
||||
email?: null | string;
|
||||
/**
|
||||
* ID identifies the logged in user, e.g. can be the primary key of the user
|
||||
*/
|
||||
id?: null | string | number;
|
||||
/**
|
||||
* Name of the user.
|
||||
*/
|
||||
username?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Culprit identifies the function call which was the primary perpetrator of this event.
|
||||
*/
|
||||
culprit?: null | string;
|
||||
/**
|
||||
* Exception holds information about the original error. The information is language specific.
|
||||
*/
|
||||
exception?: { message: string; type: string };
|
||||
/**
|
||||
* ID holds the hex encoded 128 random bits ID of the event.
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* Log holds additional information added when the error is logged.
|
||||
*/
|
||||
log?: null | {
|
||||
/**
|
||||
* Level represents the severity of the recorded log.
|
||||
*/
|
||||
level?: null | string;
|
||||
/**
|
||||
* LoggerName holds the name of the used logger instance.
|
||||
*/
|
||||
logger_name?: null | string;
|
||||
/**
|
||||
* Message of the logged error. In case a parameterized message is captured, Message should contain the same information, but with any placeholders being replaced.
|
||||
*/
|
||||
message: string;
|
||||
/**
|
||||
* ParamMessage should contain the same information as Message, but with placeholders where parameters were logged, e.g. 'error connecting to %s'. The string is not interpreted, allowing differnt placeholders per client languange. The information might be used to group errors together.
|
||||
*/
|
||||
param_message?: null | string;
|
||||
/**
|
||||
* Stacktrace information of the captured error.
|
||||
*/
|
||||
stacktrace?: null | Array<
|
||||
| {
|
||||
classname: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
filename: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
>;
|
||||
};
|
||||
/**
|
||||
* ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.
|
||||
*/
|
||||
parent_id?: null | string;
|
||||
/**
|
||||
* Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch.
|
||||
*/
|
||||
timestamp?: null | number;
|
||||
/**
|
||||
* TraceID holds the hex encoded 128 random bits ID of the correlated trace.
|
||||
*/
|
||||
trace_id?: null | string;
|
||||
/**
|
||||
* Transaction holds information about the correlated transaction.
|
||||
*/
|
||||
transaction?: null | {
|
||||
/**
|
||||
* Name is the generic designation of a transaction in the scope of a single service, eg: 'GET /users/:id'.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.
|
||||
*/
|
||||
sampled?: null | boolean;
|
||||
/**
|
||||
* Type expresses the correlated transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.
|
||||
*/
|
||||
type?: null | string;
|
||||
};
|
||||
/**
|
||||
* TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.
|
||||
*/
|
||||
transaction_id?: null | string;
|
||||
}
|
|
@ -1,312 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
export interface Metadata {
|
||||
/**
|
||||
* Cloud metadata about where the monitored service is running.
|
||||
*/
|
||||
cloud?: null | {
|
||||
/**
|
||||
* Account where the monitored service is running.
|
||||
*/
|
||||
account?: null | {
|
||||
/**
|
||||
* ID of the cloud account.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Name of the cloud account.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* AvailabilityZone where the monitored service is running, e.g. us-east-1a
|
||||
*/
|
||||
availability_zone?: null | string;
|
||||
/**
|
||||
* Instance on which the monitored service is running.
|
||||
*/
|
||||
instance?: null | {
|
||||
/**
|
||||
* ID of the cloud instance.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Name of the cloud instance.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Machine on which the monitored service is running.
|
||||
*/
|
||||
machine?: null | {
|
||||
/**
|
||||
* ID of the cloud machine.
|
||||
*/
|
||||
type?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Project in which the monitored service is running.
|
||||
*/
|
||||
project?: null | {
|
||||
/**
|
||||
* ID of the cloud project.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Name of the cloud project.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Provider that is used, e.g. aws, azure, gcp, digitalocean.
|
||||
*/
|
||||
provider: string;
|
||||
/**
|
||||
* Region where the monitored service is running, e.g. us-east-1
|
||||
*/
|
||||
region?: null | string;
|
||||
/**
|
||||
* Service that is monitored on cloud
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* Name of the cloud service, intended to distinguish services running on different platforms within a provider, eg AWS EC2 vs Lambda, GCP GCE vs App Engine, Azure VM vs App Server.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Labels are a flat mapping of user-defined tags. Allowed value types are string, boolean and number values. Labels are indexed and searchable.
|
||||
*/
|
||||
labels?: null | {
|
||||
[k: string]: null | string | boolean | number;
|
||||
};
|
||||
/**
|
||||
* Network holds information about the network over which the monitored service is communicating.
|
||||
*/
|
||||
network?: null | {
|
||||
connection?: null | {
|
||||
type?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Process metadata about the monitored service.
|
||||
*/
|
||||
process?: null | {
|
||||
/**
|
||||
* Argv holds the command line arguments used to start this process.
|
||||
*/
|
||||
argv?: null | string[];
|
||||
/**
|
||||
* PID holds the process ID of the service.
|
||||
*/
|
||||
pid: number;
|
||||
/**
|
||||
* Ppid holds the parent process ID of the service.
|
||||
*/
|
||||
ppid?: null | number;
|
||||
/**
|
||||
* Title is the process title. It can be the same as process name.
|
||||
*/
|
||||
title?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Service metadata about the monitored service.
|
||||
*/
|
||||
service: {
|
||||
/**
|
||||
* Agent holds information about the APM agent capturing the event.
|
||||
*/
|
||||
agent: {
|
||||
/**
|
||||
* EphemeralID is a free format ID used for metrics correlation by agents
|
||||
*/
|
||||
ephemeral_id?: null | string;
|
||||
/**
|
||||
* Name of the APM agent capturing information.
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* Version of the APM agent capturing information.
|
||||
*/
|
||||
version: string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Environment in which the monitored service is running, e.g. `production` or `staging`.
|
||||
*/
|
||||
environment?: null | string;
|
||||
/**
|
||||
* Framework holds information about the framework used in the monitored service.
|
||||
*/
|
||||
framework?: null | {
|
||||
/**
|
||||
* Name of the used framework
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used framework
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* ID holds a unique identifier for the running service.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Language holds information about the programming language of the monitored service.
|
||||
*/
|
||||
language?: null | {
|
||||
/**
|
||||
* Name of the used programming language
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* Version of the used programming language
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Name of the monitored service.
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* Node must be a unique meaningful name of the service node.
|
||||
*/
|
||||
node?: null | {
|
||||
/**
|
||||
* Name of the service node
|
||||
*/
|
||||
configured_name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Runtime holds information about the language runtime running the monitored service
|
||||
*/
|
||||
runtime?: null | {
|
||||
/**
|
||||
* Name of the language runtime
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* Name of the language runtime
|
||||
*/
|
||||
version: string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Version of the monitored service.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* System metadata
|
||||
*/
|
||||
system?: null | {
|
||||
/**
|
||||
* Architecture of the system the monitored service is running on.
|
||||
*/
|
||||
architecture?: null | string;
|
||||
/**
|
||||
* ConfiguredHostname is the configured name of the host the monitored service is running on. It should only be sent when configured by the user. If given, it is used as the event's hostname.
|
||||
*/
|
||||
configured_hostname?: null | string;
|
||||
/**
|
||||
* Container holds the system's container ID if available.
|
||||
*/
|
||||
container?: null | {
|
||||
/**
|
||||
* ID of the container the monitored service is running in.
|
||||
*/
|
||||
id?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* DetectedHostname is the hostname detected by the APM agent. It usually contains what the hostname command returns on the host machine. It will be used as the event's hostname if ConfiguredHostname is not present.
|
||||
*/
|
||||
detected_hostname?: null | string;
|
||||
/**
|
||||
* Deprecated: Use ConfiguredHostname and DetectedHostname instead. DeprecatedHostname is the host name of the system the service is running on. It does not distinguish between configured and detected hostname and therefore is deprecated and only used if no other hostname information is available.
|
||||
*/
|
||||
hostname?: null | string;
|
||||
/**
|
||||
* Kubernetes system information if the monitored service runs on Kubernetes.
|
||||
*/
|
||||
kubernetes?: null | {
|
||||
/**
|
||||
* Namespace of the Kubernetes resource the monitored service is run on.
|
||||
*/
|
||||
namespace?: null | string;
|
||||
/**
|
||||
* Node related information
|
||||
*/
|
||||
node?: null | {
|
||||
/**
|
||||
* Name of the Kubernetes Node
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Pod related information
|
||||
*/
|
||||
pod?: null | {
|
||||
/**
|
||||
* Name of the Kubernetes Pod
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* UID is the system-generated string uniquely identifying the Pod.
|
||||
*/
|
||||
uid?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Platform name of the system platform the monitored service is running on.
|
||||
*/
|
||||
platform?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* User metadata, which can be overwritten on a per event basis.
|
||||
*/
|
||||
user?: null | {
|
||||
/**
|
||||
* Domain of the logged in user
|
||||
*/
|
||||
domain?: null | string;
|
||||
/**
|
||||
* Email of the user.
|
||||
*/
|
||||
email?: null | string;
|
||||
/**
|
||||
* ID identifies the logged in user, e.g. can be the primary key of the user
|
||||
*/
|
||||
id?: null | string | number;
|
||||
/**
|
||||
* Name of the user.
|
||||
*/
|
||||
username?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
}
|
|
@ -1,453 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export interface Span {
|
||||
kind: 'span';
|
||||
/**
|
||||
* Action holds the specific kind of event within the sub-type represented by the span (e.g. query, connect)
|
||||
*/
|
||||
action?: null | string;
|
||||
/**
|
||||
* ChildIDs holds a list of successor transactions and/or spans.
|
||||
*/
|
||||
child_ids?: null | string[];
|
||||
/**
|
||||
* Composite holds details on a group of spans represented by a single one.
|
||||
*/
|
||||
composite?: null | {
|
||||
/**
|
||||
* A string value indicating which compression strategy was used. The valid values are `exact_match` and `same_kind`.
|
||||
*/
|
||||
compression_strategy: string;
|
||||
/**
|
||||
* Count is the number of compressed spans the composite span represents. The minimum count is 2, as a composite span represents at least two spans.
|
||||
*/
|
||||
count: number;
|
||||
/**
|
||||
* Sum is the durations of all compressed spans this composite span represents in milliseconds.
|
||||
*/
|
||||
sum: number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Context holds arbitrary contextual information for the event.
|
||||
*/
|
||||
context?: null | {
|
||||
/**
|
||||
* Database contains contextual data for database spans
|
||||
*/
|
||||
db?: null | {
|
||||
/**
|
||||
* Instance name of the database.
|
||||
*/
|
||||
instance?: null | string;
|
||||
/**
|
||||
* Link to the database server.
|
||||
*/
|
||||
link?: null | string;
|
||||
/**
|
||||
* RowsAffected shows the number of rows affected by the statement.
|
||||
*/
|
||||
rows_affected?: null | number;
|
||||
/**
|
||||
* Statement of the recorded database event, e.g. query.
|
||||
*/
|
||||
statement?: null | string;
|
||||
/**
|
||||
* Type of the recorded database event., e.g. sql, cassandra, hbase, redis.
|
||||
*/
|
||||
type?: null | string;
|
||||
/**
|
||||
* User is the username with which the database is accessed.
|
||||
*/
|
||||
user?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Destination contains contextual data about the destination of spans
|
||||
*/
|
||||
destination?: null | {
|
||||
/**
|
||||
* Address is the destination network address: hostname (e.g. 'localhost'), FQDN (e.g. 'elastic.co'), IPv4 (e.g. '127.0.0.1') IPv6 (e.g. '::1')
|
||||
*/
|
||||
address?: null | string;
|
||||
/**
|
||||
* Port is the destination network port (e.g. 443)
|
||||
*/
|
||||
port?: null | number;
|
||||
/**
|
||||
* Service describes the destination service
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* Name is the identifier for the destination service, e.g. 'http://elastic.co', 'elasticsearch', 'rabbitmq' ( DEPRECATED: this field will be removed in a future release
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Resource identifies the destination service resource being operated on e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name' DEPRECATED: this field will be removed in a future release
|
||||
*/
|
||||
resource: string;
|
||||
/**
|
||||
* Type of the destination service, e.g. db, elasticsearch. Should typically be the same as span.type. DEPRECATED: this field will be removed in a future release
|
||||
*/
|
||||
type?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* HTTP contains contextual information when the span concerns an HTTP request.
|
||||
*/
|
||||
http?: null | {
|
||||
/**
|
||||
* Method holds information about the method of the HTTP request.
|
||||
*/
|
||||
method?: null | string;
|
||||
/**
|
||||
* Response describes the HTTP response information in case the event was created as a result of an HTTP request.
|
||||
*/
|
||||
response?: null | {
|
||||
/**
|
||||
* DecodedBodySize holds the size of the decoded payload.
|
||||
*/
|
||||
decoded_body_size?: null | number;
|
||||
/**
|
||||
* EncodedBodySize holds the size of the encoded payload.
|
||||
*/
|
||||
encoded_body_size?: null | number;
|
||||
/**
|
||||
* Headers holds the http headers sent in the http response.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* StatusCode sent in the http response.
|
||||
*/
|
||||
status_code?: null | number;
|
||||
/**
|
||||
* TransferSize holds the total size of the payload.
|
||||
*/
|
||||
transfer_size?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Deprecated: Use Response.StatusCode instead. StatusCode sent in the http response.
|
||||
*/
|
||||
status_code?: null | number;
|
||||
/**
|
||||
* URL is the raw url of the correlating HTTP request.
|
||||
*/
|
||||
url?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Message holds details related to message receiving and publishing if the captured event integrates with a messaging system
|
||||
*/
|
||||
message?: null | {
|
||||
/**
|
||||
* Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.
|
||||
*/
|
||||
age?: null | {
|
||||
/**
|
||||
* Age of the message in milliseconds.
|
||||
*/
|
||||
ms?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Body of the received message, similar to an HTTP request body
|
||||
*/
|
||||
body?: null | string;
|
||||
/**
|
||||
* Headers received with the message, similar to HTTP request headers.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* Queue holds information about the message queue where the message is received.
|
||||
*/
|
||||
queue?: null | {
|
||||
/**
|
||||
* Name holds the name of the message queue where the message is received.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* RoutingKey holds the optional routing key of the received message as set on the queuing system, such as in RabbitMQ.
|
||||
*/
|
||||
routing_key?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Service related information can be sent per span. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* Agent holds information about the APM agent capturing the event.
|
||||
*/
|
||||
agent?: null | {
|
||||
/**
|
||||
* EphemeralID is a free format ID used for metrics correlation by agents
|
||||
*/
|
||||
ephemeral_id?: null | string;
|
||||
/**
|
||||
* Name of the APM agent capturing information.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the APM agent capturing information.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Environment in which the monitored service is running, e.g. `production` or `staging`.
|
||||
*/
|
||||
environment?: null | string;
|
||||
/**
|
||||
* Framework holds information about the framework used in the monitored service.
|
||||
*/
|
||||
framework?: null | {
|
||||
/**
|
||||
* Name of the used framework
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used framework
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* ID holds a unique identifier for the service.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Language holds information about the programming language of the monitored service.
|
||||
*/
|
||||
language?: null | {
|
||||
/**
|
||||
* Name of the used programming language
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used programming language
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Name of the monitored service.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Node must be a unique meaningful name of the service node.
|
||||
*/
|
||||
node?: null | {
|
||||
/**
|
||||
* Name of the service node
|
||||
*/
|
||||
configured_name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Origin contains the self-nested field groups for service.
|
||||
*/
|
||||
origin?: null | {
|
||||
/**
|
||||
* Immutable id of the service emitting this event.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Immutable name of the service emitting this event.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* The version of the service the data was collected from.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Runtime holds information about the language runtime running the monitored service
|
||||
*/
|
||||
runtime?: null | {
|
||||
/**
|
||||
* Name of the language runtime
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the language runtime
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Target holds information about the outgoing service in case of an outgoing event
|
||||
*/
|
||||
target?: (
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
) &
|
||||
(
|
||||
| ((
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
) &
|
||||
null)
|
||||
| (
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
)
|
||||
);
|
||||
/**
|
||||
* Version of the monitored service.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.
|
||||
*/
|
||||
tags?: null | {
|
||||
[k: string]: null | string | boolean | number;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Duration of the span in milliseconds. When the span is a composite one, duration is the gross duration, including "whitespace" in between spans.
|
||||
*/
|
||||
duration: number;
|
||||
/**
|
||||
* ID holds the hex encoded 64 random bits ID of the event.
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* Links holds links to other spans, potentially in other traces.
|
||||
*/
|
||||
links?: null | Array<{
|
||||
/**
|
||||
* SpanID holds the ID of the linked span.
|
||||
*/
|
||||
span_id: string;
|
||||
/**
|
||||
* TraceID holds the ID of the linked span's trace.
|
||||
*/
|
||||
trace_id: string;
|
||||
[k: string]: unknown;
|
||||
}>;
|
||||
/**
|
||||
* Name is the generic designation of a span in the scope of a transaction.
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* OTel contains unmapped OpenTelemetry attributes.
|
||||
*/
|
||||
otel?: null | {
|
||||
/**
|
||||
* Attributes hold the unmapped OpenTelemetry attributes.
|
||||
*/
|
||||
attributes?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* SpanKind holds the incoming OpenTelemetry span kind.
|
||||
*/
|
||||
span_kind?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.
|
||||
*/
|
||||
outcome?: 'success' | 'failure' | 'unknown' | null;
|
||||
/**
|
||||
* ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.
|
||||
*/
|
||||
parent_id: string;
|
||||
/**
|
||||
* SampleRate applied to the monitored service at the time where this span was recorded.
|
||||
*/
|
||||
sample_rate?: null | number;
|
||||
/**
|
||||
* Stacktrace connected to this span event.
|
||||
*/
|
||||
stacktrace?: null | Array<
|
||||
| {
|
||||
classname: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
filename: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
>;
|
||||
/**
|
||||
* Describes the event used by the Mobile SDKs: ApplicationLifecycle, Breadcrumb, Crash, Application Opened.
|
||||
*/
|
||||
event?: null | {
|
||||
name: string;
|
||||
};
|
||||
/**
|
||||
* Start is the offset relative to the transaction's timestamp identifying the start of the span, in milliseconds.
|
||||
*/
|
||||
start?: null | number;
|
||||
/**
|
||||
* Subtype is a further sub-division of the type (e.g. postgresql, elasticsearch)
|
||||
*/
|
||||
subtype?: null | string;
|
||||
/**
|
||||
* Sync indicates whether the span was executed synchronously or asynchronously.
|
||||
*/
|
||||
sync?: null | boolean;
|
||||
/**
|
||||
* Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch
|
||||
*/
|
||||
timestamp?: null | number;
|
||||
/**
|
||||
* TraceID holds the hex encoded 128 random bits ID of the correlated trace.
|
||||
*/
|
||||
trace_id: string;
|
||||
/**
|
||||
* TransactionID holds the hex encoded 64 random bits ID of the correlated transaction.
|
||||
*/
|
||||
transaction_id?: null | string;
|
||||
/**
|
||||
* Type holds the span's type, and can have specific keywords within the service's domain (eg: 'request', 'backgroundjob', etc)
|
||||
*/
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
|
@ -1,661 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export interface Transaction {
|
||||
kind: 'transaction';
|
||||
/**
|
||||
* Context holds arbitrary contextual information for the event.
|
||||
*/
|
||||
context?: null | {
|
||||
/**
|
||||
* Cloud holds fields related to the cloud or infrastructure the events are coming from.
|
||||
*/
|
||||
cloud?: null | {
|
||||
/**
|
||||
* Origin contains the self-nested field groups for cloud.
|
||||
*/
|
||||
origin?: null | {
|
||||
/**
|
||||
* The cloud account or organization id used to identify different entities in a multi-tenant environment.
|
||||
*/
|
||||
account?: null | {
|
||||
/**
|
||||
* The cloud account or organization id used to identify different entities in a multi-tenant environment.
|
||||
*/
|
||||
id?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Name of the cloud provider.
|
||||
*/
|
||||
provider?: null | string;
|
||||
/**
|
||||
* Region in which this host, resource, or service is located.
|
||||
*/
|
||||
region?: null | string;
|
||||
/**
|
||||
* The cloud service name is intended to distinguish services running on different platforms within a provider.
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* The cloud service name is intended to distinguish services running on different platforms within a provider.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Custom can contain additional metadata to be stored with the event. The format is unspecified and can be deeply nested objects. The information will not be indexed or searchable in Elasticsearch.
|
||||
*/
|
||||
custom?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Message holds details related to message receiving and publishing if the captured event integrates with a messaging system
|
||||
*/
|
||||
message?: null | {
|
||||
/**
|
||||
* Age of the message. If the monitored messaging framework provides a timestamp for the message, agents may use it. Otherwise, the sending agent can add a timestamp in milliseconds since the Unix epoch to the message's metadata to be retrieved by the receiving agent. If a timestamp is not available, agents should omit this field.
|
||||
*/
|
||||
age?: null | {
|
||||
/**
|
||||
* Age of the message in milliseconds.
|
||||
*/
|
||||
ms?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Body of the received message, similar to an HTTP request body
|
||||
*/
|
||||
body?: null | string;
|
||||
/**
|
||||
* Headers received with the message, similar to HTTP request headers.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* Queue holds information about the message queue where the message is received.
|
||||
*/
|
||||
queue?: null | {
|
||||
/**
|
||||
* Name holds the name of the message queue where the message is received.
|
||||
*/
|
||||
name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* RoutingKey holds the optional routing key of the received message as set on the queuing system, such as in RabbitMQ.
|
||||
*/
|
||||
routing_key?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Page holds information related to the current page and page referers. It is only sent from RUM agents.
|
||||
*/
|
||||
page?: null | {
|
||||
/**
|
||||
* Referer holds the URL of the page that 'linked' to the current page.
|
||||
*/
|
||||
referer?: null | string;
|
||||
/**
|
||||
* URL of the current page
|
||||
*/
|
||||
url?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Request describes the HTTP request information in case the event was created as a result of an HTTP request.
|
||||
*/
|
||||
request?: null | {
|
||||
/**
|
||||
* Body only contais the request bod, not the query string information. It can either be a dictionary (for standard HTTP requests) or a raw request body.
|
||||
*/
|
||||
body?:
|
||||
| null
|
||||
| string
|
||||
| {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Cookies used by the request, parsed as key-value objects.
|
||||
*/
|
||||
cookies?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Env holds environment variable information passed to the monitored service.
|
||||
*/
|
||||
env?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Headers includes any HTTP headers sent by the requester. Cookies will be taken by headers if supplied.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* HTTPVersion holds information about the used HTTP version.
|
||||
*/
|
||||
http_version?: null | string;
|
||||
/**
|
||||
* Method holds information about the method of the HTTP request.
|
||||
*/
|
||||
method: string;
|
||||
/**
|
||||
* Socket holds information related to the recorded request, such as whether or not data were encrypted and the remote address.
|
||||
*/
|
||||
socket?: null | {
|
||||
/**
|
||||
* Encrypted indicates whether a request was sent as TLS/HTTPS request. DEPRECATED: this field will be removed in a future release.
|
||||
*/
|
||||
encrypted?: null | boolean;
|
||||
/**
|
||||
* RemoteAddress holds the network address sending the request. It should be obtained through standard APIs and not be parsed from any headers like 'Forwarded'.
|
||||
*/
|
||||
remote_address?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* URL holds information sucha as the raw URL, scheme, host and path.
|
||||
*/
|
||||
url?: null | {
|
||||
/**
|
||||
* Full, possibly agent-assembled URL of the request, e.g. https://example.com:443/search?q=elasticsearch#top.
|
||||
*/
|
||||
full?: null | string;
|
||||
/**
|
||||
* Hash of the request URL, e.g. 'top'
|
||||
*/
|
||||
hash?: null | string;
|
||||
/**
|
||||
* Hostname information of the request, e.g. 'example.com'."
|
||||
*/
|
||||
hostname?: null | string;
|
||||
/**
|
||||
* Path of the request, e.g. '/search'
|
||||
*/
|
||||
pathname?: null | string;
|
||||
/**
|
||||
* Port of the request, e.g. '443'. Can be sent as string or int.
|
||||
*/
|
||||
port?: null | string | number;
|
||||
/**
|
||||
* Protocol information for the recorded request, e.g. 'https:'.
|
||||
*/
|
||||
protocol?: null | string;
|
||||
/**
|
||||
* Raw unparsed URL of the HTTP request line, e.g https://example.com:443/search?q=elasticsearch. This URL may be absolute or relative. For more details, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2.
|
||||
*/
|
||||
raw?: null | string;
|
||||
/**
|
||||
* Search contains the query string information of the request. It is expected to have values delimited by ampersands.
|
||||
*/
|
||||
search?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Response describes the HTTP response information in case the event was created as a result of an HTTP request.
|
||||
*/
|
||||
response?: null | {
|
||||
/**
|
||||
* DecodedBodySize holds the size of the decoded payload.
|
||||
*/
|
||||
decoded_body_size?: null | number;
|
||||
/**
|
||||
* EncodedBodySize holds the size of the encoded payload.
|
||||
*/
|
||||
encoded_body_size?: null | number;
|
||||
/**
|
||||
* Finished indicates whether the response was finished or not.
|
||||
*/
|
||||
finished?: null | boolean;
|
||||
/**
|
||||
* Headers holds the http headers sent in the http response.
|
||||
*/
|
||||
headers?: null | {
|
||||
/**
|
||||
* This interface was referenced by `undefined`'s JSON-Schema definition
|
||||
* via the `patternProperty` "[.*]*$".
|
||||
*/
|
||||
[k: string]: null | string[] | string;
|
||||
};
|
||||
/**
|
||||
* HeadersSent indicates whether http headers were sent.
|
||||
*/
|
||||
headers_sent?: null | boolean;
|
||||
/**
|
||||
* StatusCode sent in the http response.
|
||||
*/
|
||||
status_code?: null | number;
|
||||
/**
|
||||
* TransferSize holds the total size of the payload.
|
||||
*/
|
||||
transfer_size?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Service related information can be sent per event. Information provided here will override the more generic information retrieved from metadata, missing service fields will be retrieved from the metadata information.
|
||||
*/
|
||||
service?: null | {
|
||||
/**
|
||||
* Agent holds information about the APM agent capturing the event.
|
||||
*/
|
||||
agent?: null | {
|
||||
/**
|
||||
* EphemeralID is a free format ID used for metrics correlation by agents
|
||||
*/
|
||||
ephemeral_id?: null | string;
|
||||
/**
|
||||
* Name of the APM agent capturing information.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the APM agent capturing information.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Environment in which the monitored service is running, e.g. `production` or `staging`.
|
||||
*/
|
||||
environment?: null | string;
|
||||
/**
|
||||
* Framework holds information about the framework used in the monitored service.
|
||||
*/
|
||||
framework?: null | {
|
||||
/**
|
||||
* Name of the used framework
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used framework
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* ID holds a unique identifier for the service.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Language holds information about the programming language of the monitored service.
|
||||
*/
|
||||
language?: null | {
|
||||
/**
|
||||
* Name of the used programming language
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the used programming language
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Name of the monitored service.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Node must be a unique meaningful name of the service node.
|
||||
*/
|
||||
node?: null | {
|
||||
/**
|
||||
* Name of the service node
|
||||
*/
|
||||
configured_name?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Origin contains the self-nested field groups for service.
|
||||
*/
|
||||
origin?: null | {
|
||||
/**
|
||||
* Immutable id of the service emitting this event.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* Immutable name of the service emitting this event.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* The version of the service the data was collected from.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Runtime holds information about the language runtime running the monitored service
|
||||
*/
|
||||
runtime?: null | {
|
||||
/**
|
||||
* Name of the language runtime
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Version of the language runtime
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Target holds information about the outgoing service in case of an outgoing event
|
||||
*/
|
||||
target?: (
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
) &
|
||||
(
|
||||
| ((
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
) &
|
||||
null)
|
||||
| (
|
||||
| {
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
| {
|
||||
name: string;
|
||||
[k: string]: unknown;
|
||||
}
|
||||
)
|
||||
);
|
||||
/**
|
||||
* Version of the monitored service.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Tags are a flat mapping of user-defined tags. On the agent side, tags are called labels. Allowed value types are string, boolean and number values. Tags are indexed and searchable.
|
||||
*/
|
||||
tags?: null | {
|
||||
[k: string]: null | string | boolean | number;
|
||||
};
|
||||
/**
|
||||
* User holds information about the correlated user for this event. If user data are provided here, all user related information from metadata is ignored, otherwise the metadata's user information will be stored with the event.
|
||||
*/
|
||||
user?: null | {
|
||||
/**
|
||||
* Domain of the logged in user
|
||||
*/
|
||||
domain?: null | string;
|
||||
/**
|
||||
* Email of the user.
|
||||
*/
|
||||
email?: null | string;
|
||||
/**
|
||||
* ID identifies the logged in user, e.g. can be the primary key of the user
|
||||
*/
|
||||
id?: null | string | number;
|
||||
/**
|
||||
* Name of the user.
|
||||
*/
|
||||
username?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* DroppedSpanStats holds information about spans that were dropped (for example due to transaction_max_spans or exit_span_min_duration).
|
||||
*/
|
||||
dropped_spans_stats?: null | Array<{
|
||||
/**
|
||||
* DestinationServiceResource identifies the destination service resource being operated on. e.g. 'http://elastic.co:80', 'elasticsearch', 'rabbitmq/queue_name'.
|
||||
*/
|
||||
destination_service_resource?: null | string;
|
||||
/**
|
||||
* Duration holds duration aggregations about the dropped span.
|
||||
*/
|
||||
duration?: null | {
|
||||
/**
|
||||
* Count holds the number of times the dropped span happened.
|
||||
*/
|
||||
count?: null | number;
|
||||
/**
|
||||
* Sum holds dimensions about the dropped span's duration.
|
||||
*/
|
||||
sum?: null | {
|
||||
/**
|
||||
* Us represents the summation of the span duration.
|
||||
*/
|
||||
us?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Outcome of the span: success, failure, or unknown. Outcome may be one of a limited set of permitted values describing the success or failure of the span. It can be used for calculating error rates for outgoing requests.
|
||||
*/
|
||||
outcome?: 'success' | 'failure' | 'unknown' | null;
|
||||
/**
|
||||
* ServiceTargetName identifies the instance name of the target service being operated on
|
||||
*/
|
||||
service_target_name?: null | string;
|
||||
/**
|
||||
* ServiceTargetType identifies the type of the target service being operated on e.g. 'oracle', 'rabbitmq'
|
||||
*/
|
||||
service_target_type?: null | string;
|
||||
[k: string]: unknown;
|
||||
}>;
|
||||
/**
|
||||
* Duration how long the transaction took to complete, in milliseconds with 3 decimal points.
|
||||
*/
|
||||
duration: number;
|
||||
/**
|
||||
* UserExperience holds metrics for measuring real user experience. This information is only sent by RUM agents.
|
||||
*/
|
||||
experience?: null | {
|
||||
/**
|
||||
* CumulativeLayoutShift holds the Cumulative Layout Shift (CLS) metric value, or a negative value if CLS is unknown. See https://web.dev/cls/
|
||||
*/
|
||||
cls?: null | number;
|
||||
/**
|
||||
* FirstInputDelay holds the First Input Delay (FID) metric value, or a negative value if FID is unknown. See https://web.dev/fid/
|
||||
*/
|
||||
fid?: null | number;
|
||||
/**
|
||||
* Longtask holds longtask duration/count metrics.
|
||||
*/
|
||||
longtask?: null | {
|
||||
/**
|
||||
* Count is the total number of of longtasks.
|
||||
*/
|
||||
count: number;
|
||||
/**
|
||||
* Max longtask duration
|
||||
*/
|
||||
max: number;
|
||||
/**
|
||||
* Sum of longtask durations
|
||||
*/
|
||||
sum: number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* TotalBlockingTime holds the Total Blocking Time (TBT) metric value, or a negative value if TBT is unknown. See https://web.dev/tbt/
|
||||
*/
|
||||
tbt?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* FAAS holds fields related to Function as a Service events.
|
||||
*/
|
||||
faas?: null | {
|
||||
/**
|
||||
* Indicates whether a function invocation was a cold start or not.
|
||||
*/
|
||||
coldstart?: null | boolean;
|
||||
/**
|
||||
* The request id of the function invocation.
|
||||
*/
|
||||
execution?: null | string;
|
||||
/**
|
||||
* A unique identifier of the invoked serverless function.
|
||||
*/
|
||||
id?: null | string;
|
||||
/**
|
||||
* The lambda function name.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* Trigger attributes.
|
||||
*/
|
||||
trigger?: null | {
|
||||
/**
|
||||
* The id of the origin trigger request.
|
||||
*/
|
||||
request_id?: null | string;
|
||||
/**
|
||||
* The trigger type.
|
||||
*/
|
||||
type?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* The lambda function version.
|
||||
*/
|
||||
version?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* ID holds the hex encoded 64 random bits ID of the event.
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* Links holds links to other spans, potentially in other traces.
|
||||
*/
|
||||
links?: null | Array<{
|
||||
/**
|
||||
* SpanID holds the ID of the linked span.
|
||||
*/
|
||||
span_id: string;
|
||||
/**
|
||||
* TraceID holds the ID of the linked span's trace.
|
||||
*/
|
||||
trace_id: string;
|
||||
[k: string]: unknown;
|
||||
}>;
|
||||
/**
|
||||
* Marks capture the timing of a significant event during the lifetime of a transaction. Marks are organized into groups and can be set by the user or the agent. Marks are only reported by RUM agents.
|
||||
*/
|
||||
marks?: null | {
|
||||
[k: string]: null | {
|
||||
[k: string]: null | number;
|
||||
};
|
||||
};
|
||||
/**
|
||||
* Name is the generic designation of a transaction in the scope of a single service, eg: 'GET /users/:id'.
|
||||
*/
|
||||
name?: null | string;
|
||||
/**
|
||||
* OTel contains unmapped OpenTelemetry attributes.
|
||||
*/
|
||||
otel?: null | {
|
||||
/**
|
||||
* Attributes hold the unmapped OpenTelemetry attributes.
|
||||
*/
|
||||
attributes?: null | {
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* SpanKind holds the incoming OpenTelemetry span kind.
|
||||
*/
|
||||
span_kind?: null | string;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Outcome of the transaction with a limited set of permitted values, describing the success or failure of the transaction from the service's perspective. It is used for calculating error rates for incoming requests. Permitted values: success, failure, unknown.
|
||||
*/
|
||||
outcome?: 'success' | 'failure' | 'unknown' | null;
|
||||
/**
|
||||
* ParentID holds the hex encoded 64 random bits ID of the parent transaction or span.
|
||||
*/
|
||||
parent_id?: null | string;
|
||||
/**
|
||||
* Result of the transaction. For HTTP-related transactions, this should be the status code formatted like 'HTTP 2xx'.
|
||||
*/
|
||||
result?: null | string;
|
||||
/**
|
||||
* SampleRate applied to the monitored service at the time where this transaction was recorded. Allowed values are [0..1]. A SampleRate <1 indicates that not all spans are recorded.
|
||||
*/
|
||||
sample_rate?: null | number;
|
||||
/**
|
||||
* Sampled indicates whether or not the full information for a transaction is captured. If a transaction is unsampled no spans and less context information will be reported.
|
||||
*/
|
||||
sampled?: null | boolean;
|
||||
/**
|
||||
* Session holds optional transaction session information for RUM.
|
||||
*/
|
||||
session?: null | {
|
||||
/**
|
||||
* ID holds a session ID for grouping a set of related transactions.
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* Sequence holds an optional sequence number for a transaction within a session. It is not meaningful to compare sequences across two different sessions.
|
||||
*/
|
||||
sequence?: null | number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* SpanCount counts correlated spans.
|
||||
*/
|
||||
span_count: {
|
||||
/**
|
||||
* Dropped is the number of correlated spans that have been dropped by the APM agent recording the transaction.
|
||||
*/
|
||||
dropped?: null | number;
|
||||
/**
|
||||
* Started is the number of correlated spans that are recorded.
|
||||
*/
|
||||
started: number;
|
||||
[k: string]: unknown;
|
||||
};
|
||||
/**
|
||||
* Timestamp holds the recorded time of the event, UTC based and formatted as microseconds since Unix epoch
|
||||
*/
|
||||
timestamp?: null | number;
|
||||
/**
|
||||
* TraceID holds the hex encoded 128 random bits ID of the correlated trace.
|
||||
*/
|
||||
trace_id: string;
|
||||
/**
|
||||
* Type expresses the transaction's type as keyword that has specific relevance within the service's domain, eg: 'request', 'backgroundjob'.
|
||||
*/
|
||||
type: string;
|
||||
[k: string]: unknown;
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import objectHash from 'object-hash';
|
||||
import { groupBy, pickBy } from 'lodash';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { createPicker } from '../utils/create_picker';
|
||||
|
||||
const instanceFields = [
|
||||
'container.*',
|
||||
'kubernetes.*',
|
||||
'agent.*',
|
||||
'process.*',
|
||||
'cloud.*',
|
||||
'service.*',
|
||||
'host.*',
|
||||
];
|
||||
|
||||
const instancePicker = createPicker(instanceFields);
|
||||
|
||||
const metricsetPicker = createPicker([
|
||||
'transaction.type',
|
||||
'transaction.name',
|
||||
'span.type',
|
||||
'span.subtype',
|
||||
]);
|
||||
|
||||
export function getBreakdownMetrics(events: ApmFields[]) {
|
||||
const txWithSpans = groupBy(
|
||||
events.filter(
|
||||
(event) => event['processor.event'] === 'span' || event['processor.event'] === 'transaction'
|
||||
),
|
||||
(event) => event['transaction.id']
|
||||
);
|
||||
|
||||
const metricsets: Map<string, ApmFields> = new Map();
|
||||
|
||||
Object.keys(txWithSpans).forEach((transactionId) => {
|
||||
const txEvents = txWithSpans[transactionId];
|
||||
const transaction = txEvents.find((event) => event['processor.event'] === 'transaction');
|
||||
if (transaction === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
const eventsById: Record<string, ApmFields> = {};
|
||||
const activityByParentId: Record<string, Array<{ from: number; to: number }>> = {};
|
||||
for (const event of txEvents) {
|
||||
const id =
|
||||
event['processor.event'] === 'transaction' ? event['transaction.id'] : event['span.id'];
|
||||
eventsById[id!] = event;
|
||||
|
||||
const parentId = event['parent.id'];
|
||||
|
||||
if (!parentId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!activityByParentId[parentId]) {
|
||||
activityByParentId[parentId] = [];
|
||||
}
|
||||
|
||||
const from = event['@timestamp']! * 1000;
|
||||
const to =
|
||||
from +
|
||||
(event['processor.event'] === 'transaction'
|
||||
? event['transaction.duration.us']!
|
||||
: event['span.duration.us']!);
|
||||
|
||||
activityByParentId[parentId].push({ from, to });
|
||||
}
|
||||
|
||||
// eslint-disable-next-line guard-for-in
|
||||
for (const id in eventsById) {
|
||||
const event = eventsById[id];
|
||||
const activities = activityByParentId[id] || [];
|
||||
|
||||
const timeStart = event['@timestamp']! * 1000;
|
||||
|
||||
let selfTime = 0;
|
||||
let lastMeasurement = timeStart;
|
||||
const changeTimestamps = [
|
||||
...new Set([
|
||||
timeStart,
|
||||
...activities.flatMap((activity) => [activity.from, activity.to]),
|
||||
timeStart +
|
||||
(event['processor.event'] === 'transaction'
|
||||
? event['transaction.duration.us']!
|
||||
: event['span.duration.us']!),
|
||||
]),
|
||||
];
|
||||
|
||||
for (const timestamp of changeTimestamps) {
|
||||
const hasActiveChildren = activities.some(
|
||||
(activity) => activity.from < timestamp && activity.to >= timestamp
|
||||
);
|
||||
|
||||
if (!hasActiveChildren) {
|
||||
selfTime += timestamp - lastMeasurement;
|
||||
}
|
||||
|
||||
lastMeasurement = timestamp;
|
||||
}
|
||||
|
||||
const instance = pickBy(event, instancePicker);
|
||||
|
||||
const key = {
|
||||
'@timestamp': event['@timestamp']! - (event['@timestamp']! % (30 * 1000)),
|
||||
'transaction.type': transaction['transaction.type'],
|
||||
'transaction.name': transaction['transaction.name'],
|
||||
...pickBy(event, metricsetPicker),
|
||||
...instance,
|
||||
};
|
||||
|
||||
const metricsetId = objectHash(key);
|
||||
|
||||
let metricset = metricsets.get(metricsetId);
|
||||
|
||||
if (!metricset) {
|
||||
metricset = {
|
||||
...key,
|
||||
'processor.event': 'metric',
|
||||
'processor.name': 'metric',
|
||||
'metricset.name': `span_breakdown`,
|
||||
'span.self_time.count': 0,
|
||||
'span.self_time.sum.us': 0,
|
||||
};
|
||||
|
||||
if (event['processor.event'] === 'transaction') {
|
||||
metricset['span.type'] = 'app';
|
||||
} else {
|
||||
metricset['span.type'] = event['span.type'];
|
||||
metricset['span.subtype'] = event['span.subtype'];
|
||||
}
|
||||
|
||||
metricsets.set(metricsetId, metricset);
|
||||
}
|
||||
|
||||
metricset['span.self_time.count']!++;
|
||||
metricset['span.self_time.sum.us']! += selfTime;
|
||||
}
|
||||
});
|
||||
|
||||
return Array.from(metricsets.values());
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { aggregate } from '../utils/aggregate';
|
||||
|
||||
export function getSpanDestinationMetrics(events: ApmFields[]) {
|
||||
const exitSpans = events.filter((event) => !!event['span.destination.service.resource']);
|
||||
|
||||
const metricsets = aggregate(exitSpans, [
|
||||
'event.outcome',
|
||||
'agent.name',
|
||||
'service.environment',
|
||||
'service.name',
|
||||
'span.destination.service.resource',
|
||||
'span.name',
|
||||
]);
|
||||
|
||||
return metricsets.map((metricset) => {
|
||||
let count = 0;
|
||||
let sum = 0;
|
||||
|
||||
for (const event of metricset.events) {
|
||||
count++;
|
||||
sum += event['span.duration.us']!;
|
||||
}
|
||||
|
||||
return {
|
||||
...metricset.key,
|
||||
['metricset.name']: 'service_destination',
|
||||
'span.destination.service.response_time.sum.us': sum,
|
||||
'span.destination.service.response_time.count': count,
|
||||
};
|
||||
});
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { sortBy } from 'lodash';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { aggregate } from '../utils/aggregate';
|
||||
|
||||
function sortAndCompressHistogram(histogram?: { values: number[]; counts: number[] }) {
|
||||
return sortBy(histogram?.values).reduce(
|
||||
(prev, current) => {
|
||||
const lastValue = prev.values[prev.values.length - 1];
|
||||
if (lastValue === current) {
|
||||
prev.counts[prev.counts.length - 1]++;
|
||||
return prev;
|
||||
}
|
||||
|
||||
prev.counts.push(1);
|
||||
prev.values.push(current);
|
||||
|
||||
return prev;
|
||||
},
|
||||
{ values: [] as number[], counts: [] as number[] }
|
||||
);
|
||||
}
|
||||
|
||||
export function getTransactionMetrics(events: ApmFields[]) {
|
||||
const transactions = events
|
||||
.filter((event) => event['processor.event'] === 'transaction')
|
||||
.map((transaction) => {
|
||||
return {
|
||||
...transaction,
|
||||
['transaction.root']: transaction['parent.id'] === undefined,
|
||||
};
|
||||
});
|
||||
|
||||
const metricsets = aggregate(transactions, [
|
||||
'trace.root',
|
||||
'transaction.root',
|
||||
'transaction.name',
|
||||
'transaction.type',
|
||||
'event.outcome',
|
||||
'transaction.result',
|
||||
'agent.name',
|
||||
'service.environment',
|
||||
'service.name',
|
||||
'service.version',
|
||||
'host.name',
|
||||
'container.id',
|
||||
'kubernetes.pod.name',
|
||||
'cloud.account.id',
|
||||
'cloud.account.name',
|
||||
'cloud.machine.type',
|
||||
'cloud.project.id',
|
||||
'cloud.project.name',
|
||||
'cloud.service.name',
|
||||
'service.language.name',
|
||||
'service.language.version',
|
||||
'service.runtime.name',
|
||||
'service.runtime.version',
|
||||
'host.os.platform',
|
||||
'faas.id',
|
||||
'faas.coldstart',
|
||||
'faas.trigger.type',
|
||||
]);
|
||||
|
||||
return metricsets.map((metricset) => {
|
||||
const histogram = {
|
||||
values: [] as number[],
|
||||
counts: [] as number[],
|
||||
};
|
||||
|
||||
for (const transaction of metricset.events) {
|
||||
histogram.counts.push(1);
|
||||
histogram.values.push(Number(transaction['transaction.duration.us']));
|
||||
}
|
||||
return {
|
||||
...metricset.key,
|
||||
'metricset.name': 'transaction',
|
||||
'transaction.duration.histogram': sortAndCompressHistogram(histogram),
|
||||
_doc_count: metricset.events.length,
|
||||
};
|
||||
});
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import moment from 'moment';
|
||||
import { pickBy } from 'lodash';
|
||||
import objectHash from 'object-hash';
|
||||
import { ApmFields } from '../apm_fields';
|
||||
import { createPicker } from './create_picker';
|
||||
|
||||
export function aggregate(events: ApmFields[], fields: string[]) {
|
||||
const picker = createPicker(fields);
|
||||
|
||||
const metricsets = new Map<string, { key: ApmFields; events: ApmFields[] }>();
|
||||
|
||||
function getMetricsetKey(span: ApmFields) {
|
||||
const timestamp = moment(span['@timestamp']).valueOf();
|
||||
return {
|
||||
'@timestamp': timestamp - (timestamp % (60 * 1000)),
|
||||
...pickBy(span, picker),
|
||||
};
|
||||
}
|
||||
|
||||
for (const event of events) {
|
||||
const key = getMetricsetKey(event);
|
||||
const id = objectHash(key);
|
||||
|
||||
let metricset = metricsets.get(id);
|
||||
if (!metricset) {
|
||||
metricset = {
|
||||
key: { ...key, 'processor.event': 'metric', 'processor.name': 'metric' },
|
||||
events: [],
|
||||
};
|
||||
metricsets.set(id, metricset);
|
||||
}
|
||||
metricset.events.push(event);
|
||||
}
|
||||
|
||||
return Array.from(metricsets.values());
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Client } from '@elastic/elasticsearch';
|
||||
|
||||
export interface ApmElasticsearchOutputWriteTargets {
|
||||
transaction: string;
|
||||
span: string;
|
||||
error: string;
|
||||
metric: string;
|
||||
app_metric: string;
|
||||
}
|
||||
|
||||
export async function getApmWriteTargets({
|
||||
client,
|
||||
forceLegacyIndices,
|
||||
}: {
|
||||
client: Client;
|
||||
forceLegacyIndices?: boolean;
|
||||
}): Promise<ApmElasticsearchOutputWriteTargets> {
|
||||
if (!forceLegacyIndices) {
|
||||
return {
|
||||
transaction: 'traces-apm-default',
|
||||
span: 'traces-apm-default',
|
||||
metric: 'metrics-apm.internal-default',
|
||||
app_metric: 'metrics-apm.app-default',
|
||||
error: 'logs-apm.error-default',
|
||||
};
|
||||
}
|
||||
|
||||
const [indicesResponse, datastreamsResponse] = await Promise.all([
|
||||
client.indices.getAlias({
|
||||
index: 'apm-*',
|
||||
}),
|
||||
client.indices.getDataStream({
|
||||
name: '*apm',
|
||||
}),
|
||||
]);
|
||||
|
||||
function getDataStreamName(filter: string) {
|
||||
return datastreamsResponse.data_streams.find((stream) => stream.name.includes(filter))?.name;
|
||||
}
|
||||
|
||||
function getAlias(filter: string) {
|
||||
return Object.keys(indicesResponse)
|
||||
.map((key) => {
|
||||
return {
|
||||
key,
|
||||
writeIndexAlias: Object.entries(indicesResponse[key].aliases).find(
|
||||
([_, alias]) => alias.is_write_index
|
||||
)?.[0],
|
||||
};
|
||||
})
|
||||
.find(({ key, writeIndexAlias }) => writeIndexAlias && key.includes(filter))
|
||||
?.writeIndexAlias!;
|
||||
}
|
||||
const metricsTarget = getDataStreamName('metrics-apm') || getAlias('-metric');
|
||||
const targets = {
|
||||
transaction: getDataStreamName('traces-apm') || getAlias('-transaction'),
|
||||
span: getDataStreamName('traces-apm') || getAlias('-span'),
|
||||
metric: metricsTarget,
|
||||
app_metric: metricsTarget,
|
||||
error: getDataStreamName('logs-apm') || getAlias('-error'),
|
||||
};
|
||||
|
||||
if (!targets.transaction || !targets.span || !targets.metric || !targets.error) {
|
||||
throw new Error('Write targets could not be determined');
|
||||
}
|
||||
|
||||
return targets;
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Interval } from './interval';
|
||||
import { EntityStreams } from './entity_streams';
|
||||
import { EntityIterable } from './entity_iterable';
|
||||
import { Serializable } from './serializable';
|
||||
|
||||
export class EntityGenerator<TField> implements EntityIterable<TField> {
|
||||
private readonly _gen: () => Generator<Serializable<TField>>;
|
||||
constructor(
|
||||
private readonly interval: Interval,
|
||||
dataGenerator: (interval: Interval) => Generator<Serializable<TField>>
|
||||
) {
|
||||
this._order = interval.from > interval.to ? 'desc' : 'asc';
|
||||
|
||||
const generator = dataGenerator(this.interval);
|
||||
const peek = generator.next();
|
||||
const value = peek.value;
|
||||
|
||||
let callCount = 0;
|
||||
this._gen = function* () {
|
||||
if (callCount === 0) {
|
||||
callCount++;
|
||||
yield value;
|
||||
yield* generator;
|
||||
} else {
|
||||
yield* dataGenerator(this.interval);
|
||||
}
|
||||
};
|
||||
|
||||
const peekedNumberOfEvents = peek.done ? 0 : peek.value.serialize().length;
|
||||
this._ratePerMinute = interval.estimatedRatePerMinute() * peekedNumberOfEvents;
|
||||
}
|
||||
|
||||
private readonly _order: 'desc' | 'asc';
|
||||
order() {
|
||||
return this._order;
|
||||
}
|
||||
|
||||
toArray(): TField[] {
|
||||
return Array.from(this);
|
||||
}
|
||||
|
||||
merge(...iterables: Array<EntityIterable<TField>>): EntityStreams<TField> {
|
||||
return new EntityStreams([this, ...iterables]);
|
||||
}
|
||||
|
||||
private readonly _ratePerMinute: number;
|
||||
estimatedRatePerMinute() {
|
||||
return this._ratePerMinute;
|
||||
}
|
||||
|
||||
*[Symbol.iterator]() {
|
||||
for (const span of this._gen()) {
|
||||
for (const fields of span.serialize()) {
|
||||
yield fields;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async *[Symbol.asyncIterator]() {
|
||||
for (const span of this._gen()) {
|
||||
for (const fields of span.serialize()) {
|
||||
yield fields;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { ApmFields } from './apm/apm_fields';
|
||||
import { EntityStreams } from './entity_streams';
|
||||
import { Fields } from './entity';
|
||||
|
||||
export interface EntityIterable<TFields extends Fields = ApmFields>
|
||||
extends Iterable<TFields>,
|
||||
AsyncIterable<TFields> {
|
||||
order(): 'desc' | 'asc';
|
||||
|
||||
estimatedRatePerMinute(): number;
|
||||
|
||||
toArray(): ApmFields[];
|
||||
|
||||
merge(...iterables: Array<EntityIterable<TFields>>): EntityStreams<TFields>;
|
||||
}
|
||||
|
||||
export class EntityArrayIterable<TFields extends Fields = ApmFields>
|
||||
implements EntityIterable<TFields>
|
||||
{
|
||||
constructor(private fields: TFields[]) {
|
||||
const timestamps = fields.filter((f) => f['@timestamp']).map((f) => f['@timestamp']!);
|
||||
this._order = timestamps.length > 1 ? (timestamps[0] > timestamps[1] ? 'desc' : 'asc') : 'asc';
|
||||
const sorted = timestamps.sort();
|
||||
const [first, last] = [sorted[0], sorted.slice(-1)[0]];
|
||||
const numberOfMinutes = Math.ceil(Math.abs(last - first) / (1000 * 60)) % 60;
|
||||
this._ratePerMinute = sorted.length / numberOfMinutes;
|
||||
}
|
||||
|
||||
private readonly _order: 'desc' | 'asc';
|
||||
order() {
|
||||
return this._order;
|
||||
}
|
||||
|
||||
private readonly _ratePerMinute: number;
|
||||
estimatedRatePerMinute() {
|
||||
return this._ratePerMinute;
|
||||
}
|
||||
|
||||
async *[Symbol.asyncIterator](): AsyncIterator<TFields> {
|
||||
return this.fields[Symbol.iterator]();
|
||||
}
|
||||
|
||||
[Symbol.iterator](): Iterator<TFields> {
|
||||
return this.fields[Symbol.iterator]();
|
||||
}
|
||||
|
||||
merge(...iterables: Array<EntityIterable<TFields>>): EntityStreams<TFields> {
|
||||
return new EntityStreams<TFields>([this, ...iterables]);
|
||||
}
|
||||
|
||||
toArray(): TFields[] {
|
||||
return this.fields;
|
||||
}
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { EntityIterable } from './entity_iterable';
|
||||
import { merge } from './utils/merge_iterable';
|
||||
|
||||
export class EntityStreams<TFields> implements EntityIterable<TFields> {
|
||||
constructor(private readonly dataGenerators: Array<EntityIterable<TFields>>) {
|
||||
const orders = new Set<'desc' | 'asc'>(dataGenerators.map((d) => d.order()));
|
||||
if (orders.size > 1) throw Error('Can only combine intervals with the same order()');
|
||||
this._order = orders.has('asc') ? 'asc' : 'desc';
|
||||
|
||||
this._ratePerMinute = dataGenerators
|
||||
.map((d) => d.estimatedRatePerMinute())
|
||||
.reduce((a, b) => a + b, 0);
|
||||
}
|
||||
|
||||
private readonly _order: 'desc' | 'asc';
|
||||
order() {
|
||||
return this._order;
|
||||
}
|
||||
|
||||
private readonly _ratePerMinute: number;
|
||||
estimatedRatePerMinute() {
|
||||
return this._ratePerMinute;
|
||||
}
|
||||
|
||||
toArray(): TFields[] {
|
||||
return Array.from(this);
|
||||
}
|
||||
|
||||
merge(...iterables: Array<EntityIterable<TFields>>): EntityStreams<TFields> {
|
||||
return new EntityStreams([...this.dataGenerators, ...iterables]);
|
||||
}
|
||||
|
||||
*[Symbol.iterator](): Iterator<TFields> {
|
||||
const iterator = merge(this.dataGenerators);
|
||||
for (const fields of iterator) {
|
||||
yield fields;
|
||||
}
|
||||
}
|
||||
|
||||
async *[Symbol.asyncIterator](): AsyncIterator<TFields> {
|
||||
const iterator = merge(this.dataGenerators);
|
||||
for await (const fields of iterator) {
|
||||
yield fields;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import moment, { unitOfTime } from 'moment';
|
||||
import { random } from 'lodash';
|
||||
import { EntityIterable } from './entity_iterable';
|
||||
import { EntityGenerator } from './entity_generator';
|
||||
import { Serializable } from './serializable';
|
||||
|
||||
export function parseInterval(interval: string): {
|
||||
intervalAmount: number;
|
||||
intervalUnit: unitOfTime.DurationConstructor;
|
||||
} {
|
||||
const args = interval.match(/(\d+)(s|m|h|d)/);
|
||||
if (!args || args.length < 3) {
|
||||
throw new Error('Failed to parse interval');
|
||||
}
|
||||
return {
|
||||
intervalAmount: Number(args[1]),
|
||||
intervalUnit: args[2] as unitOfTime.DurationConstructor,
|
||||
};
|
||||
}
|
||||
|
||||
export interface IntervalOptions {
|
||||
from: Date;
|
||||
to: Date;
|
||||
interval: string;
|
||||
yieldRate?: number;
|
||||
|
||||
intervalUpper?: number;
|
||||
rateUpper?: number;
|
||||
}
|
||||
|
||||
export class Interval implements Iterable<number> {
|
||||
constructor(public readonly options: IntervalOptions) {
|
||||
const { intervalAmount, intervalUnit } = parseInterval(options.interval);
|
||||
this.intervalAmount = intervalAmount;
|
||||
this.intervalUnit = intervalUnit;
|
||||
this.from = this.options.from;
|
||||
this.to = this.options.to;
|
||||
}
|
||||
public readonly from: Date;
|
||||
public readonly to: Date;
|
||||
|
||||
private readonly intervalAmount: number;
|
||||
private readonly intervalUnit: unitOfTime.DurationConstructor;
|
||||
generator<TField>(
|
||||
map: (timestamp: number, index?: number) => Serializable<TField> | Array<Serializable<TField>>
|
||||
): EntityIterable<TField> {
|
||||
return new EntityGenerator(this, function* (i) {
|
||||
let index = 0;
|
||||
for (const x of i) {
|
||||
const data = map(x, index);
|
||||
if (Array.isArray(data)) {
|
||||
yield* data;
|
||||
} else {
|
||||
yield data;
|
||||
}
|
||||
index++;
|
||||
}
|
||||
});
|
||||
}
|
||||
rate(rate: number): Interval {
|
||||
return new Interval({ ...this.options, yieldRate: rate });
|
||||
}
|
||||
|
||||
randomize(rateUpper: number, intervalUpper: number): Interval {
|
||||
return new Interval({ ...this.options, intervalUpper, rateUpper });
|
||||
}
|
||||
|
||||
estimatedRatePerMinute(): number {
|
||||
const rate = this.options.rateUpper
|
||||
? Math.max(1, this.options.rateUpper)
|
||||
: this.options.yieldRate ?? 1;
|
||||
|
||||
const interval = this.options.intervalUpper ? this.options.intervalUpper : this.intervalAmount;
|
||||
const first = moment();
|
||||
const last = moment(first).subtract(interval, this.intervalUnit);
|
||||
const numberOfMinutes =
|
||||
(Math.abs(last.toDate().getTime() - first.toDate().getTime()) / (1000 * 60)) % 60;
|
||||
return rate / numberOfMinutes;
|
||||
}
|
||||
|
||||
private yieldRateTimestamps(timestamp: number) {
|
||||
const rate = this.options.rateUpper
|
||||
? random(this.options.yieldRate ?? 1, Math.max(1, this.options.rateUpper))
|
||||
: this.options.yieldRate ?? 1;
|
||||
return new Array<number>(rate).fill(timestamp);
|
||||
}
|
||||
|
||||
private *_generate(): Iterable<number> {
|
||||
if (this.from > this.to) {
|
||||
let now = this.from;
|
||||
do {
|
||||
yield* this.yieldRateTimestamps(now.getTime());
|
||||
const amount = this.interval();
|
||||
now = new Date(moment(now).subtract(amount, this.intervalUnit).valueOf());
|
||||
} while (now > this.to);
|
||||
} else {
|
||||
let now = this.from;
|
||||
do {
|
||||
yield* this.yieldRateTimestamps(now.getTime());
|
||||
const amount = this.interval();
|
||||
now = new Date(moment(now).add(amount, this.intervalUnit).valueOf());
|
||||
} while (now < this.to);
|
||||
}
|
||||
}
|
||||
|
||||
private interval() {
|
||||
return this.options.intervalUpper
|
||||
? random(this.intervalAmount, this.options.intervalUpper)
|
||||
: this.intervalAmount;
|
||||
}
|
||||
|
||||
[Symbol.iterator]() {
|
||||
return this._generate()[Symbol.iterator]();
|
||||
}
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Entity } from '../entity';
|
||||
import { generateShortId } from '../utils/generate_id';
|
||||
import { Kibana } from './kibana';
|
||||
import { StackMonitoringFields } from './stack_monitoring_fields';
|
||||
import { ClusterStats } from './cluster_stats';
|
||||
|
||||
export class Cluster extends Entity<StackMonitoringFields> {
|
||||
kibana(name: string, index: string = '.kibana') {
|
||||
return new Kibana({
|
||||
cluster_uuid: this.fields.cluster_uuid,
|
||||
'kibana_stats.kibana.name': name,
|
||||
'kibana_stats.kibana.uuid': generateShortId(),
|
||||
'kibana_stats.kibana.index': index,
|
||||
type: 'kibana_stats',
|
||||
});
|
||||
}
|
||||
|
||||
stats() {
|
||||
return new ClusterStats({
|
||||
...this.fields,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export function cluster(name: string) {
|
||||
return new Cluster({
|
||||
cluster_name: name,
|
||||
cluster_uuid: generateShortId(),
|
||||
});
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Serializable } from '../serializable';
|
||||
import { StackMonitoringFields } from './stack_monitoring_fields';
|
||||
|
||||
export class ClusterStats extends Serializable<StackMonitoringFields> {
|
||||
constructor(fields: StackMonitoringFields) {
|
||||
super(fields);
|
||||
|
||||
this.fields.type = 'cluster_stats';
|
||||
this.fields['license.status'] = 'active';
|
||||
}
|
||||
|
||||
timestamp(timestamp: number): this {
|
||||
super.timestamp(timestamp);
|
||||
this.fields['cluster_stats.timestamp'] = new Date(timestamp).toISOString();
|
||||
return this;
|
||||
}
|
||||
|
||||
indices(count: number): this {
|
||||
this.fields['cluster_stats.indices.count'] = count;
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Serializable } from '../serializable';
|
||||
import { StackMonitoringFields } from './stack_monitoring_fields';
|
||||
|
||||
export class KibanaStats extends Serializable<StackMonitoringFields> {
|
||||
timestamp(timestamp: number): this {
|
||||
this.fields['kibana_stats.timestamp'] = new Date(timestamp).toISOString();
|
||||
this.fields['kibana_stats.response_times.max'] = 250;
|
||||
this.fields['kibana_stats.kibana.status'] = 'green';
|
||||
this.fields.timestamp = timestamp;
|
||||
return this;
|
||||
}
|
||||
|
||||
requests(disconnects: number, total: number): this {
|
||||
this.fields['kibana_stats.requests.disconnects'] = disconnects;
|
||||
this.fields['kibana_stats.requests.total'] = total;
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Fields } from '../entity';
|
||||
|
||||
export type StackMonitoringFields = Fields &
|
||||
Partial<{
|
||||
cluster_name: string;
|
||||
cluster_uuid: string;
|
||||
type: string;
|
||||
|
||||
'cluster_stats.timestamp': string;
|
||||
'cluster_stats.indices.count': number;
|
||||
'license.status': string;
|
||||
|
||||
'kibana_stats.kibana.name': string;
|
||||
'kibana_stats.kibana.uuid': string;
|
||||
'kibana_stats.kibana.status': string;
|
||||
'kibana_stats.kibana.index': string;
|
||||
'kibana_stats.requests.disconnects': number;
|
||||
'kibana_stats.requests.total': number;
|
||||
'kibana_stats.timestamp': string;
|
||||
'kibana_stats.response_times.max': number;
|
||||
timestamp: number;
|
||||
}>;
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { Client } from '@elastic/elasticsearch';
|
||||
import { ApmFields, Fields } from '../..';
|
||||
|
||||
export interface StreamAggregator<TFields extends Fields = ApmFields> {
|
||||
name: string;
|
||||
|
||||
getWriteTarget(document: Record<string, any>): string | null;
|
||||
|
||||
process(event: TFields): Fields[] | null;
|
||||
|
||||
flush(): Fields[];
|
||||
|
||||
bootstrapElasticsearch(esClient: Client): Promise<void>;
|
||||
|
||||
getDataStreamName(): string;
|
||||
|
||||
getDimensions(): string[];
|
||||
|
||||
getMappings(): Record<string, any>;
|
||||
}
|
|
@ -1,259 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import moment from 'moment';
|
||||
import { ApmFields } from './apm/apm_fields';
|
||||
import { EntityIterable } from './entity_iterable';
|
||||
import { getTransactionMetrics } from './apm/processors/get_transaction_metrics';
|
||||
import { getSpanDestinationMetrics } from './apm/processors/get_span_destination_metrics';
|
||||
import { getBreakdownMetrics } from './apm/processors/get_breakdown_metrics';
|
||||
import { parseInterval } from './interval';
|
||||
import { dedot } from './utils/dedot';
|
||||
import { ApmElasticsearchOutputWriteTargets } from './apm/utils/get_apm_write_targets';
|
||||
import { Logger } from './utils/create_logger';
|
||||
import { Fields } from './entity';
|
||||
import { StreamAggregator } from './stream_aggregator';
|
||||
|
||||
export interface StreamProcessorOptions<TFields extends Fields = ApmFields> {
|
||||
version?: string;
|
||||
processors?: Array<(events: TFields[]) => TFields[]>;
|
||||
streamAggregators?: Array<StreamAggregator<TFields>>;
|
||||
flushInterval?: string;
|
||||
// defaults to 10k
|
||||
maxBufferSize?: number;
|
||||
// the maximum source events to process, not the maximum documents outputted by the processor
|
||||
maxSourceEvents?: number;
|
||||
logger?: Logger;
|
||||
name?: string;
|
||||
// called everytime maxBufferSize is processed
|
||||
processedCallback?: (processedDocuments: number) => void;
|
||||
}
|
||||
|
||||
export class StreamProcessor<TFields extends Fields = ApmFields> {
|
||||
public static readonly apmProcessors = [
|
||||
getTransactionMetrics,
|
||||
getSpanDestinationMetrics,
|
||||
getBreakdownMetrics,
|
||||
];
|
||||
public static defaultFlushInterval: number = 10000;
|
||||
private readonly processors: Array<(events: TFields[]) => TFields[]>;
|
||||
private readonly streamAggregators: Array<StreamAggregator<TFields>>;
|
||||
|
||||
constructor(private readonly options: StreamProcessorOptions<TFields>) {
|
||||
const { intervalAmount, intervalUnit } = this.options.flushInterval
|
||||
? parseInterval(this.options.flushInterval)
|
||||
: parseInterval('1m');
|
||||
this.intervalAmount = intervalAmount;
|
||||
this.intervalUnit = intervalUnit;
|
||||
this.name = this.options?.name ?? 'StreamProcessor';
|
||||
this.version = this.options.version ?? '8.0.0';
|
||||
this.versionMajor = Number.parseInt(this.version.split('.')[0], 10);
|
||||
this.processors = options.processors ?? [];
|
||||
this.streamAggregators = options.streamAggregators ?? [];
|
||||
}
|
||||
private readonly intervalAmount: number;
|
||||
private readonly intervalUnit: any;
|
||||
public readonly name: string;
|
||||
public readonly version: string;
|
||||
private readonly versionMajor: number;
|
||||
|
||||
// TODO move away from chunking and feed this data one by one to processors
|
||||
*stream(...eventSources: Array<EntityIterable<TFields>>): Generator<ApmFields, any, any> {
|
||||
const maxBufferSize = this.options.maxBufferSize ?? StreamProcessor.defaultFlushInterval;
|
||||
const maxSourceEvents = this.options.maxSourceEvents;
|
||||
let localBuffer = [];
|
||||
let flushAfter: number | null = null;
|
||||
let sourceEventsYielded = 0;
|
||||
for (const eventSource of eventSources) {
|
||||
const order = eventSource.order();
|
||||
this.options.logger?.debug(`order: ${order}`);
|
||||
for (const event of eventSource) {
|
||||
const eventDate = event['@timestamp'] as number;
|
||||
localBuffer.push(event);
|
||||
if (flushAfter === null && eventDate !== null) {
|
||||
flushAfter = this.calculateFlushAfter(eventDate, order);
|
||||
}
|
||||
|
||||
yield StreamProcessor.enrich(event, this.version, this.versionMajor);
|
||||
sourceEventsYielded++;
|
||||
for (const aggregator of this.streamAggregators) {
|
||||
const aggregatedEvents = aggregator.process(event);
|
||||
if (aggregatedEvents) {
|
||||
yield* aggregatedEvents.map((d) =>
|
||||
StreamProcessor.enrich(d, this.version, this.versionMajor)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (sourceEventsYielded % maxBufferSize === 0) {
|
||||
if (this.options?.processedCallback) {
|
||||
this.options.processedCallback(maxBufferSize);
|
||||
}
|
||||
}
|
||||
if (maxSourceEvents && sourceEventsYielded % maxBufferSize === 0) {
|
||||
this.options.logger?.debug(`${this.name} yielded ${sourceEventsYielded} events`);
|
||||
}
|
||||
if (maxSourceEvents && sourceEventsYielded >= maxSourceEvents) {
|
||||
// yielded the maximum source events, we still want the local buffer to generate derivative documents
|
||||
break;
|
||||
}
|
||||
if (
|
||||
localBuffer.length === maxBufferSize ||
|
||||
(flushAfter != null &&
|
||||
((order === 'asc' && eventDate > flushAfter) ||
|
||||
(order === 'desc' && eventDate < flushAfter)))
|
||||
) {
|
||||
const e = new Date(eventDate).toISOString();
|
||||
const f = new Date(flushAfter!).toISOString();
|
||||
this.options.logger?.debug(
|
||||
`${this.name} flush ${localBuffer.length} documents ${order}: ${e} => ${f}`
|
||||
);
|
||||
for (const processor of this.processors) {
|
||||
yield* processor(localBuffer).map((d) =>
|
||||
StreamProcessor.enrich(d, this.version, this.versionMajor)
|
||||
);
|
||||
}
|
||||
localBuffer = [];
|
||||
flushAfter = this.calculateFlushAfter(flushAfter, order);
|
||||
}
|
||||
}
|
||||
if (maxSourceEvents && sourceEventsYielded >= maxSourceEvents) {
|
||||
this.options.logger?.info(
|
||||
`${this.name} yielded maximum number of documents: ${maxSourceEvents}`
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (localBuffer.length > 0) {
|
||||
this.options.logger?.info(
|
||||
`${this.name} processing remaining buffer: ${localBuffer.length} items left`
|
||||
);
|
||||
for (const processor of this.processors) {
|
||||
yield* processor(localBuffer).map((d) =>
|
||||
StreamProcessor.enrich(d, this.version, this.versionMajor)
|
||||
);
|
||||
}
|
||||
this.options.processedCallback?.apply(this, [localBuffer.length]);
|
||||
}
|
||||
for (const aggregator of this.streamAggregators) {
|
||||
yield* aggregator.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private calculateFlushAfter(eventDate: number | null, order: 'asc' | 'desc') {
|
||||
if (order === 'desc') {
|
||||
return moment(eventDate).subtract(this.intervalAmount, this.intervalUnit).valueOf();
|
||||
} else {
|
||||
return moment(eventDate).add(this.intervalAmount, this.intervalUnit).valueOf();
|
||||
}
|
||||
}
|
||||
|
||||
async *streamAsync(...eventSources: Array<EntityIterable<TFields>>): AsyncIterable<ApmFields> {
|
||||
yield* this.stream(...eventSources);
|
||||
}
|
||||
|
||||
*streamToDocument<TDocument>(
|
||||
map: (d: ApmFields) => TDocument,
|
||||
...eventSources: Array<EntityIterable<TFields>>
|
||||
): Generator<TDocument> {
|
||||
for (const apmFields of this.stream(...eventSources)) {
|
||||
yield map(apmFields);
|
||||
}
|
||||
}
|
||||
async *streamToDocumentAsync<TDocument>(
|
||||
map: (d: ApmFields) => TDocument,
|
||||
...eventSources: Array<EntityIterable<TFields>>
|
||||
): AsyncIterable<TDocument> & AsyncIterator<TDocument> {
|
||||
for await (const apmFields of this.stream(...eventSources)) {
|
||||
yield map(apmFields);
|
||||
}
|
||||
}
|
||||
streamToArray(...eventSources: Array<EntityIterable<TFields>>) {
|
||||
return Array.from<ApmFields>(this.stream(...eventSources));
|
||||
}
|
||||
|
||||
private static enrich(document: ApmFields, version: string, versionMajor: number): ApmFields {
|
||||
// see https://github.com/elastic/apm-server/issues/7088 can not be provided as flat key/values
|
||||
document.observer = {
|
||||
type: 'synthtrace',
|
||||
version: version ?? '8.2.0',
|
||||
version_major: versionMajor,
|
||||
};
|
||||
document['service.node.name'] =
|
||||
document['service.node.name'] || document['container.id'] || document['host.name'];
|
||||
document['ecs.version'] = '1.4';
|
||||
|
||||
return document;
|
||||
}
|
||||
|
||||
toDocument(document: ApmFields): Record<string, any> {
|
||||
const newDoc: Record<string, any> = {};
|
||||
if (!document.observer) {
|
||||
document = StreamProcessor.enrich(document, this.version, this.versionMajor);
|
||||
}
|
||||
dedot(document, newDoc);
|
||||
if (typeof newDoc['@timestamp'] === 'number') {
|
||||
const timestamp = newDoc['@timestamp'];
|
||||
newDoc['@timestamp'] = new Date(timestamp).toISOString();
|
||||
}
|
||||
return newDoc;
|
||||
}
|
||||
|
||||
getDataStreamForEvent(d: Record<string, any>, writeTargets: ApmElasticsearchOutputWriteTargets) {
|
||||
if (!d.processor?.event) {
|
||||
throw Error("'processor.event' is not set on document, can not determine target index");
|
||||
}
|
||||
const eventType = d.processor.event as keyof ApmElasticsearchOutputWriteTargets;
|
||||
let dataStream = writeTargets[eventType];
|
||||
if (eventType === 'metric') {
|
||||
if (d.metricset?.name === 'agent_config') {
|
||||
dataStream = 'metrics-apm.internal-default';
|
||||
} else if (!d.service?.name) {
|
||||
dataStream = 'metrics-apm.app-default';
|
||||
} else {
|
||||
if (!d.transaction && !d.span) {
|
||||
dataStream = 'metrics-apm.app-default';
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const aggregator of this.streamAggregators) {
|
||||
const target = aggregator.getWriteTarget(d);
|
||||
if (target) {
|
||||
dataStream = target;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return dataStream;
|
||||
}
|
||||
|
||||
static getIndexForEvent(
|
||||
d: Record<string, any>,
|
||||
writeTargets: ApmElasticsearchOutputWriteTargets
|
||||
) {
|
||||
if (!d.processor?.event) {
|
||||
throw Error("'processor.event' is not set on document, can not determine target index");
|
||||
}
|
||||
|
||||
const eventType = d.processor.event as keyof ApmElasticsearchOutputWriteTargets;
|
||||
return writeTargets[eventType];
|
||||
}
|
||||
}
|
||||
|
||||
export async function* streamProcessAsync<TFields>(
|
||||
processors: Array<(events: TFields[]) => TFields[]>,
|
||||
...eventSources: Array<EntityIterable<TFields>>
|
||||
) {
|
||||
return new StreamProcessor({ processors }).streamAsync(...eventSources);
|
||||
}
|
||||
|
||||
export function streamProcessToArray<TFields>(
|
||||
processors: Array<(events: TFields[]) => TFields[]>,
|
||||
...eventSources: Array<EntityIterable<TFields>>
|
||||
) {
|
||||
return new StreamProcessor({ processors }).streamToArray(...eventSources);
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Client } from '@elastic/elasticsearch';
|
||||
import { Logger } from './create_logger';
|
||||
|
||||
export async function cleanWriteTargets({
|
||||
targets,
|
||||
client,
|
||||
logger,
|
||||
}: {
|
||||
targets: string[];
|
||||
client: Client;
|
||||
logger: Logger;
|
||||
}) {
|
||||
logger.info(`Cleaning indices: ${targets.join(', ')}`);
|
||||
|
||||
const response = await client.deleteByQuery({
|
||||
index: targets,
|
||||
allow_no_indices: true,
|
||||
conflicts: 'proceed',
|
||||
refresh: true,
|
||||
body: {
|
||||
query: {
|
||||
match_all: {},
|
||||
},
|
||||
},
|
||||
wait_for_completion: false,
|
||||
});
|
||||
|
||||
const task = response.task;
|
||||
|
||||
if (task) {
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const pollForTaskCompletion = async () => {
|
||||
const taskResponse = await client.tasks.get({
|
||||
task_id: String(task),
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
`Polled for task:\n${JSON.stringify(taskResponse, ['completed', 'error'], 2)}`
|
||||
);
|
||||
|
||||
if (taskResponse.completed) {
|
||||
resolve();
|
||||
} else if (taskResponse.error) {
|
||||
reject(taskResponse.error);
|
||||
} else {
|
||||
setTimeout(pollForTaskCompletion, 2500);
|
||||
}
|
||||
};
|
||||
|
||||
pollForTaskCompletion();
|
||||
});
|
||||
}
|
||||
}
|
|
@ -6,9 +6,7 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
function isPromise(val: any): val is Promise<any> {
|
||||
return val && typeof val === 'object' && 'then' in val && typeof val.then === 'function';
|
||||
}
|
||||
import { logPerf } from './log_perf';
|
||||
|
||||
export enum LogLevel {
|
||||
trace = 0,
|
||||
|
@ -22,28 +20,9 @@ function getTimeString() {
|
|||
}
|
||||
|
||||
export function createLogger(logLevel: LogLevel) {
|
||||
function logPerf(name: string, start: bigint) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.debug(
|
||||
getTimeString(),
|
||||
`${name}: ${Number(process.hrtime.bigint() - start) / 1000000}ms`
|
||||
);
|
||||
}
|
||||
return {
|
||||
perf: <T extends any>(name: string, cb: () => T): T => {
|
||||
if (logLevel <= LogLevel.trace) {
|
||||
const start = process.hrtime.bigint();
|
||||
const val = cb();
|
||||
if (isPromise(val)) {
|
||||
val.then(() => {
|
||||
logPerf(name, start);
|
||||
});
|
||||
} else {
|
||||
logPerf(name, start);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
return cb();
|
||||
const logger: Logger = {
|
||||
perf: (name, callback) => {
|
||||
return logPerf(logger, logLevel, name, callback);
|
||||
},
|
||||
debug: (...args: any[]) => {
|
||||
if (logLevel <= LogLevel.debug) {
|
||||
|
@ -64,6 +43,13 @@ export function createLogger(logLevel: LogLevel) {
|
|||
}
|
||||
},
|
||||
};
|
||||
|
||||
return logger;
|
||||
}
|
||||
|
||||
export type Logger = ReturnType<typeof createLogger>;
|
||||
export interface Logger {
|
||||
perf: <T>(name: string, cb: () => T) => T;
|
||||
debug: (...args: any[]) => void;
|
||||
info: (...args: any[]) => void;
|
||||
error: (...args: any[]) => void;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { sortBy } from 'lodash';
|
||||
// @ts-expect-error
|
||||
import Histogram from 'native-hdr-histogram';
|
||||
|
||||
const ONE_HOUR_IN_MICRO_SECONDS = 1000 * 1000 * 60 * 60;
|
||||
|
||||
interface SerializedHistogram {
|
||||
counts: number[];
|
||||
values: number[];
|
||||
total: number;
|
||||
sum: number;
|
||||
}
|
||||
|
||||
const MAX_VALUES_TO_TRACK_LOSSLESS = 10;
|
||||
|
||||
class LosslessHistogram {
|
||||
private backingHistogram: any;
|
||||
|
||||
private readonly min: number;
|
||||
private readonly max: number;
|
||||
|
||||
private readonly trackedValues: Map<number, number> = new Map();
|
||||
|
||||
constructor(options?: { min?: number; max?: number }) {
|
||||
const { min, max } = options ?? {};
|
||||
this.min = min ?? 1;
|
||||
this.max = max ?? ONE_HOUR_IN_MICRO_SECONDS;
|
||||
}
|
||||
|
||||
private getBackingHistogram() {
|
||||
if (this.backingHistogram) {
|
||||
return this.backingHistogram;
|
||||
}
|
||||
|
||||
const histogram = new Histogram(this.min, this.max);
|
||||
|
||||
this.backingHistogram = histogram;
|
||||
|
||||
if (this.trackedValues.size > 0) {
|
||||
this.trackedValues.forEach((count, value) => {
|
||||
histogram.record(value, count);
|
||||
});
|
||||
}
|
||||
|
||||
return histogram;
|
||||
}
|
||||
|
||||
record(value: number) {
|
||||
const countForValue = this.trackedValues.get(value);
|
||||
if (
|
||||
this.backingHistogram ||
|
||||
(countForValue === undefined && this.trackedValues.size >= MAX_VALUES_TO_TRACK_LOSSLESS)
|
||||
) {
|
||||
this.getBackingHistogram().record(value);
|
||||
return;
|
||||
}
|
||||
|
||||
this.trackedValues.set(value, 1 + (countForValue ?? 0));
|
||||
}
|
||||
|
||||
serialize(): SerializedHistogram {
|
||||
if (this.backingHistogram) {
|
||||
const minRecordedValue = this.backingHistogram.min();
|
||||
const maxRecordedValue = this.backingHistogram.max();
|
||||
|
||||
const distribution: Array<{ value: number; count: number }> =
|
||||
this.backingHistogram.linearcounts(Math.max(1, (maxRecordedValue - minRecordedValue) / 50));
|
||||
|
||||
const values: number[] = [];
|
||||
const counts: number[] = [];
|
||||
|
||||
let sum: number = 0;
|
||||
|
||||
for (const { value, count } of distribution) {
|
||||
values.push(value);
|
||||
counts.push(count);
|
||||
sum += value * count;
|
||||
}
|
||||
|
||||
return {
|
||||
values,
|
||||
counts,
|
||||
total: this.backingHistogram.totalCount,
|
||||
sum,
|
||||
};
|
||||
}
|
||||
|
||||
const values: number[] = [];
|
||||
const counts: number[] = [];
|
||||
let total = 0;
|
||||
let sum = 0;
|
||||
|
||||
let sortedValues: Array<{ value: number; count: number }> = [];
|
||||
|
||||
this.trackedValues.forEach((count, value) => {
|
||||
sortedValues.push({ count, value });
|
||||
});
|
||||
|
||||
sortedValues = sortBy(sortedValues, ({ value }) => value);
|
||||
|
||||
sortedValues.forEach(({ value, count }) => {
|
||||
values.push(value);
|
||||
counts.push(count);
|
||||
total += count;
|
||||
sum += value * count;
|
||||
});
|
||||
|
||||
return { values, counts, total, sum };
|
||||
}
|
||||
}
|
||||
|
||||
export function createLosslessHistogram(options?: { min?: number; max?: number }) {
|
||||
return new LosslessHistogram(options);
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { appendHash, Fields, parseInterval } from '@kbn/apm-synthtrace-client';
|
||||
import moment from 'moment';
|
||||
import { Duplex, PassThrough } from 'stream';
|
||||
|
||||
export function createMetricAggregatorFactory<TFields extends Fields>() {
|
||||
return function <TMetric extends Record<string, any>, TOutput extends Record<string, any>>(
|
||||
{
|
||||
filter,
|
||||
getAggregateKey,
|
||||
init,
|
||||
flushInterval,
|
||||
}: {
|
||||
filter: (event: TFields) => boolean;
|
||||
getAggregateKey: (event: TFields) => string;
|
||||
init: (event: TFields) => TMetric;
|
||||
flushInterval: string;
|
||||
},
|
||||
reduce: (metric: TMetric, event: TFields) => void,
|
||||
serialize: (metric: TMetric) => TOutput
|
||||
) {
|
||||
let cb: (() => void) | undefined;
|
||||
|
||||
const metrics: Map<string, TMetric & { '@timestamp'?: number }> = new Map();
|
||||
|
||||
const { intervalAmount, intervalUnit } = parseInterval(flushInterval);
|
||||
|
||||
let nextFlush: number = Number.MIN_VALUE;
|
||||
|
||||
const flushEveryMs = moment.duration(intervalAmount, intervalUnit).asMilliseconds();
|
||||
|
||||
let toFlush: TMetric[] = [];
|
||||
|
||||
function flush(stream: Duplex, includeCurrentMetrics: boolean, callback?: () => void) {
|
||||
const allItems = [...toFlush];
|
||||
|
||||
toFlush = [];
|
||||
|
||||
if (includeCurrentMetrics) {
|
||||
allItems.push(...metrics.values());
|
||||
metrics.clear();
|
||||
}
|
||||
|
||||
while (allItems.length) {
|
||||
const next = allItems.shift()!;
|
||||
const serialized = serialize(next);
|
||||
const shouldWriteNext = stream.push(serialized);
|
||||
if (!shouldWriteNext) {
|
||||
toFlush = allItems;
|
||||
cb = callback;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const next = cb;
|
||||
cb = undefined;
|
||||
next?.();
|
||||
callback?.();
|
||||
}
|
||||
|
||||
function getNextFlush(timestamp: number) {
|
||||
return Math.ceil(timestamp / flushEveryMs) * flushEveryMs;
|
||||
}
|
||||
|
||||
return new PassThrough({
|
||||
objectMode: true,
|
||||
read() {
|
||||
flush(this, false, cb);
|
||||
},
|
||||
final(callback) {
|
||||
flush(this, true, callback);
|
||||
},
|
||||
write(event: TFields, encoding, callback) {
|
||||
if (!filter(event)) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
const timestamp = event['@timestamp']!;
|
||||
|
||||
function writeMetric() {
|
||||
const truncatedTimestamp = Math.floor(timestamp / flushEveryMs) * flushEveryMs;
|
||||
|
||||
const key = appendHash(getAggregateKey(event), truncatedTimestamp.toString());
|
||||
|
||||
let set = metrics.get(key);
|
||||
|
||||
if (!set) {
|
||||
set = init({ ...event });
|
||||
set['@timestamp'] = truncatedTimestamp;
|
||||
metrics.set(key, set);
|
||||
}
|
||||
|
||||
reduce(set, event);
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
if (timestamp > nextFlush) {
|
||||
nextFlush = getNextFlush(timestamp);
|
||||
flush(this, true, writeMetric);
|
||||
} else {
|
||||
writeMetric();
|
||||
}
|
||||
},
|
||||
});
|
||||
};
|
||||
}
|
38
packages/kbn-apm-synthtrace/src/lib/utils/log_perf.ts
Normal file
38
packages/kbn-apm-synthtrace/src/lib/utils/log_perf.ts
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Logger, LogLevel } from './create_logger';
|
||||
|
||||
function isPromise(val: any): val is Promise<any> {
|
||||
return val && typeof val === 'object' && 'then' in val && typeof val.then === 'function';
|
||||
}
|
||||
|
||||
function logTo(logger: Logger, name: string, start: bigint) {
|
||||
logger.debug(`${name}: ${Number(process.hrtime.bigint() - start) / 1000000}ms`);
|
||||
}
|
||||
|
||||
export const logPerf = <T extends any>(
|
||||
logger: Logger,
|
||||
logLevel: LogLevel,
|
||||
name: string,
|
||||
cb: () => T
|
||||
): T => {
|
||||
if (logLevel <= LogLevel.trace) {
|
||||
const start = process.hrtime.bigint();
|
||||
const val = cb();
|
||||
if (isPromise(val)) {
|
||||
val.finally(() => {
|
||||
logTo(logger, name, start);
|
||||
});
|
||||
} else {
|
||||
logTo(logger, name, start);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
return cb();
|
||||
};
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { range } from 'lodash';
|
||||
import { ApmFields } from '../apm/apm_fields';
|
||||
import { Fields } from '../entity';
|
||||
import { EntityIterable } from '../entity_iterable';
|
||||
|
||||
export function merge<TField extends Fields>(
|
||||
iterables: Array<EntityIterable<TField>>
|
||||
): Iterable<TField> {
|
||||
if (iterables.length === 1) return iterables[0];
|
||||
|
||||
const iterators = iterables.map<{ it: Iterator<ApmFields>; weight: number }>((i) => {
|
||||
return { it: i[Symbol.iterator](), weight: Math.max(1, i.estimatedRatePerMinute()) };
|
||||
});
|
||||
let done = false;
|
||||
const myIterable: Iterable<TField> = {
|
||||
*[Symbol.iterator]() {
|
||||
do {
|
||||
const items = iterators.flatMap((i) => range(0, i.weight).map(() => i.it.next()));
|
||||
done = items.every((item) => item.done);
|
||||
if (!done) {
|
||||
yield* items.filter((i) => !i.done).map((i) => i.value);
|
||||
}
|
||||
} while (!done);
|
||||
// Done for the first time: close all iterators
|
||||
for (const iterator of iterators) {
|
||||
if (typeof iterator.it.return === 'function') {
|
||||
iterator.it.return();
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
return myIterable;
|
||||
}
|
66
packages/kbn-apm-synthtrace/src/lib/utils/stream_utils.ts
Normal file
66
packages/kbn-apm-synthtrace/src/lib/utils/stream_utils.ts
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { eachSeries } from 'async';
|
||||
import MultiStream from 'multistream';
|
||||
import { Duplex, Readable, Transform } from 'stream';
|
||||
|
||||
export function sequential(...streams: Readable[]) {
|
||||
return new MultiStream(streams, { objectMode: true });
|
||||
}
|
||||
|
||||
export function fork(...streams: Transform[]): Duplex {
|
||||
const proxy = new Transform({
|
||||
objectMode: true,
|
||||
final(callback) {
|
||||
eachSeries(
|
||||
streams,
|
||||
(stream, cb) => {
|
||||
stream.end(cb);
|
||||
},
|
||||
callback
|
||||
);
|
||||
},
|
||||
transform(chunk, encoding, callback) {
|
||||
eachSeries(
|
||||
streams,
|
||||
(stream, cb) => {
|
||||
const shouldWriteNext = stream.write(chunk, cb);
|
||||
if (!shouldWriteNext) {
|
||||
stream.once('drain', cb);
|
||||
}
|
||||
},
|
||||
() => {
|
||||
callback();
|
||||
}
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
streams.forEach((stream) =>
|
||||
stream.on('data', (chunk) => {
|
||||
proxy.push(chunk);
|
||||
})
|
||||
);
|
||||
|
||||
return proxy;
|
||||
}
|
||||
|
||||
export function createFilterTransform(filter: (chunk: any) => boolean): Transform {
|
||||
const transform = new Transform({
|
||||
objectMode: true,
|
||||
transform(event, encoding, callback) {
|
||||
if (filter(event)) {
|
||||
callback(null, event);
|
||||
} else {
|
||||
callback(null);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
return transform;
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { promises } from 'stream';
|
||||
|
||||
export function awaitStream<T>(
|
||||
stream: NodeJS.ReadableStream | NodeJS.WritableStream | NodeJS.ReadWriteStream
|
||||
): Promise<T[]> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const data: T[] = [];
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
data.push(chunk);
|
||||
});
|
||||
|
||||
promises
|
||||
.finished(stream)
|
||||
.then(() => resolve(data))
|
||||
.catch(reject);
|
||||
});
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue