Telemetry collection support for Logstash agent driven monitoring. (#184480)

## Summary
Telemetry collection support for Logstash agent driven monitoring.
This change also guarantees to collect metricbeat
monitoring metrics when no cluster UUID found in the event.

Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
Mashhur 2024-06-04 16:49:56 -07:00 committed by GitHub
parent 1cae10d307
commit d750aca73f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
29 changed files with 4723 additions and 1260 deletions

View file

@ -127,7 +127,14 @@ export const STANDALONE_CLUSTER_CLUSTER_UUID = '__standalone_cluster__';
export const CCS_REMOTE_PATTERN = '*';
export const INDEX_PATTERN = '.monitoring-*';
export const INDEX_PATTERN_KIBANA = '.monitoring-kibana-*';
export const INDEX_PATTERN_LOGSTASH = '.monitoring-logstash-*';
export const INDEX_PATTERN_LOGSTASH = '*-logstash*';
export const INDEX_PATTERN_LOGSTASH_MONITORING = '.monitoring-logstash-*';
export const INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATE =
'.ds-metrics-logstash.stack_monitoring.node-*';
export const INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATS =
'.ds-metrics-logstash.stack_monitoring.node_stats-*';
export const INDEX_PATTERN_LOGSTASH_METRICS_NODE = '.ds-metrics-logstash.node-*';
export const INDEX_PATTERN_LOGSTASH_METRICS_PLUGINS = '.ds-metrics-logstash.plugins-*';
export const INDEX_PATTERN_BEATS = '.monitoring-beats-*';
export const INDEX_ALERTS = '.monitoring-alerts-*';
export const INDEX_PATTERN_ELASTICSEARCH = '.monitoring-es-*';

View file

@ -13,11 +13,11 @@ import {
INDEX_PATTERN_BEATS,
INDEX_PATTERN_ELASTICSEARCH,
INDEX_PATTERN_KIBANA,
INDEX_PATTERN_LOGSTASH,
INDEX_PATTERN_LOGSTASH_MONITORING,
} from '../../../../common/constants';
import { MonitoringConfig } from '../../../types';
const INDEX_PATTERNS = `${INDEX_PATTERN_ELASTICSEARCH},${INDEX_PATTERN_KIBANA},${INDEX_PATTERN_LOGSTASH},${INDEX_PATTERN_BEATS}`;
const INDEX_PATTERNS = `${INDEX_PATTERN_ELASTICSEARCH},${INDEX_PATTERN_KIBANA},${INDEX_PATTERN_LOGSTASH_MONITORING},${INDEX_PATTERN_BEATS}`;
export const useDerivedIndexPattern = (
dataViews: DataViewsPublicPluginStart,

View file

@ -12,7 +12,7 @@ import { MonitoringConfig } from '../../../config';
import {
INDEX_PATTERN_ELASTICSEARCH,
INDEX_PATTERN_KIBANA,
INDEX_PATTERN_LOGSTASH,
INDEX_PATTERN_LOGSTASH_MONITORING,
INDEX_PATTERN_BEATS,
} from '../../../../common/constants';
import { fetchStackProductUsage } from './fetch_stack_product_usage';
@ -31,7 +31,7 @@ export const getStackProductsUsage = async (
> => {
const elasticsearchIndex = getCcsIndexPattern(INDEX_PATTERN_ELASTICSEARCH, availableCcs);
const kibanaIndex = getCcsIndexPattern(INDEX_PATTERN_KIBANA, availableCcs);
const logstashIndex = getCcsIndexPattern(INDEX_PATTERN_LOGSTASH, availableCcs);
const logstashIndex = getCcsIndexPattern(INDEX_PATTERN_LOGSTASH_MONITORING, availableCcs);
const beatsIndex = getCcsIndexPattern(INDEX_PATTERN_BEATS, availableCcs);
const [elasticsearch, kibana, logstash, beats, apm] = await Promise.all([
fetchESUsage(callCluster, clusterUuid, elasticsearchIndex),

View file

@ -10,7 +10,7 @@ import {
INDEX_PATTERN_ELASTICSEARCH,
INDEX_PATTERN_ELASTICSEARCH_ECS,
INDEX_PATTERN_KIBANA,
INDEX_PATTERN_LOGSTASH,
INDEX_PATTERN_LOGSTASH_MONITORING,
INDEX_PATTERN_BEATS,
DS_INDEX_PATTERN_LOGS,
DS_INDEX_PATTERN_METRICS,
@ -63,7 +63,7 @@ export function getLegacyIndexPattern({
indexPattern = INDEX_PATTERN_KIBANA;
break;
case 'logstash':
indexPattern = INDEX_PATTERN_LOGSTASH;
indexPattern = INDEX_PATTERN_LOGSTASH_MONITORING;
break;
case 'apm':
case 'beats':

View file

@ -11,7 +11,7 @@ import { prefixIndexPatternWithCcs } from '../../../../../../common/ccs_utils';
import {
INDEX_PATTERN_ELASTICSEARCH,
INDEX_PATTERN_KIBANA,
INDEX_PATTERN_LOGSTASH,
INDEX_PATTERN_LOGSTASH_MONITORING,
} from '../../../../../../common/constants';
import {
postElasticsearchSettingsInternalMonitoringRequestPayloadRT,
@ -95,7 +95,11 @@ export function internalMonitoringCheckRoute(server: MonitoringCore, npRoute: Ro
const { ccs } = request.body;
const esIndexPattern = prefixIndexPatternWithCcs(config, INDEX_PATTERN_ELASTICSEARCH, ccs);
const kbnIndexPattern = prefixIndexPatternWithCcs(config, INDEX_PATTERN_KIBANA, ccs);
const lsIndexPattern = prefixIndexPatternWithCcs(config, INDEX_PATTERN_LOGSTASH, ccs);
const lsIndexPattern = prefixIndexPatternWithCcs(
config,
INDEX_PATTERN_LOGSTASH_MONITORING,
ccs
);
const indexCounts = await Promise.all([
checkLatestMonitoringIsLegacy(context, esIndexPattern),
checkLatestMonitoringIsLegacy(context, kbnIndexPattern),

View file

@ -0,0 +1,361 @@
[
{
"hits": {
"hits": [
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "HhiXFJ2P-SgCWlwiAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"type": "filebeat",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "1n1p"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a3bf686e789a4fbb312192cf2f27667831f9c56b7c9394c1f2d8b933fe5c3ddc",
"name": "test",
"plugin": {
"input": {
"events": {
"out": 2284
},
"flow": {
"throughput": {
"current": 0.996,
"last_1_minute": 0.998
}
},
"id": "0fbbcc99c0d3c4f98b4e9b0c18444f5c5e35af87e85333d37c72016fe5e4fd69",
"name": "generator",
"source": {
"column": "9",
"id": "config_string",
"line": 1,
"protocol": "string"
},
"time": {
"queue_push_duration": {
"ms": 2291305
}
}
},
"type": "input"
}
}
}
},
"sort": [
1717435170221
]
},
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "ZcSDHAOEF2fIqTjgAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "1n1p"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a2acd1139178b1bcac1c6b716462d504553d5295fc887930efa031fdc3126c83",
"name": "another_test",
"plugin": {
"input": {
"events": {
"out": 2286
},
"flow": {
"throughput": {
"current": 0.996,
"last_1_minute": 0.998
}
},
"id": "a0ae5c8de68c84e836256c337eb815ce73f3ec03c70b298958319bdbbe4c52b2",
"name": "heartbeat",
"source": {
"column": "5",
"id": "/Users/mashhur/Dev/elastic/logstash/config/simple.conf",
"line": 2,
"protocol": "file"
},
"time": {
"queue_push_duration": {
"ms": 945
}
}
},
"type": "input"
}
}
}
},
"sort": [
1717435170221
]
},
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "Up_iA7xmQOSt7LGdAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"type": "filebeat",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "1n1p"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a3bf686e789a4fbb312192cf2f27667831f9c56b7c9394c1f2d8b933fe5c3ddc",
"name": "test",
"plugin": {
"codec": {
"decode": {
"duration": {
"ms": 0
},
"in": 0,
"out": 0
},
"encode": {
"duration": {
"ms": 394
},
"in": 2281
},
"id": "dots_035e9b95-4fd5-449d-9be1-2ea92eeca1d4",
"name": "dots"
},
"type": "codec"
}
}
}
},
"sort": [
1717435170221
]
}
]
}
}
]

View file

@ -0,0 +1,233 @@
[
{
"hits" : {
"hits" : [
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "6EMlJsVNKzn4Y3TAAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "1nmp"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a3bf686e789a4fbb312192cf2f27667831f9c56b7c9394c1f2d8b933fe5c3ddc",
"name": "test",
"plugin": {
"codec": {
"decode": {
"duration": {
"ms": 2292616
},
"in": 2284,
"out": 2284
},
"encode": {
"duration": {
"ms": 0
},
"in": 0
},
"id": "plain_5fea2751-110a-4919-b777-94791da43bb9",
"name": "plain"
},
"type": "codec"
}
}
}
},
"sort": [
1717435170221
]
},
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "Owe4Rq9c7dsJjf_XAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "1nmp"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a2acd1139178b1bcac1c6b716462d504553d5295fc887930efa031fdc3126c83",
"name": "another_test",
"plugin": {
"codec": {
"decode": {
"duration": {
"ms": 0
},
"in": 0,
"out": 0
},
"encode": {
"duration": {
"ms": 0
},
"in": 0
},
"id": "plain_a9b27bb6-f9dd-4c54-bb93-c61f1b9001b3",
"name": "plain"
},
"type": "codec"
}
}
}
},
"sort": [
1717435170221
]
}
]
}
}]

View file

@ -0,0 +1,359 @@
[
{
"hits": {
"hits": [
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "J8oAnuIIe2zBYQGpAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"type": "filebeat",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a2acd1139178b1bcac1c6b716462d504553d5295fc887930efa031fdc3126c83",
"name": "another_test",
"plugin": {
"codec": {
"decode": {
"duration": {
"ms": 0
},
"in": 0,
"out": 0
},
"encode": {
"duration": {
"ms": 0
},
"in": 0
},
"id": "plain_2c8a8f0b-f3a2-48a2-b835-21ea67302a3b",
"name": "plain"
},
"type": "codec"
}
}
}
},
"sort": [
1717435170221
]
},
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "14mHJFH_SEc7Mq-fAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a2acd1139178b1bcac1c6b716462d504553d5295fc887930efa031fdc3126c83",
"name": "another_test",
"plugin": {
"codec": {
"decode": {
"duration": {
"ms": 0
},
"in": 0,
"out": 0
},
"encode": {
"duration": {
"ms": 5500
},
"in": 2286
},
"id": "rubydebug_ccce8eeb-10ce-4565-8642-7d8986d41995",
"name": "rubydebug"
},
"type": "codec"
}
}
}
},
"sort": [
1717435170221
]
},
{
"_index": ".ds-metrics-logstash.plugins-default-2024.06.03-000001",
"_id": "Csohjtytgt1uKi_vAAABj98c_a0",
"_score": null,
"_source": {
"@timestamp": "2024-06-03T17:19:30.221Z",
"agent": {
"name": "Mashhurs-MacBook-Pro.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"data_stream": {
"dataset": "logstash.plugins",
"namespace": "default",
"type": "metrics"
},
"ecs": {
"version": "8.0.0"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"event": {
"agent_id_status": "auth_metadata_missing",
"dataset": "logstash.plugins",
"ingested": "2024-06-03T17:19:40Z"
},
"host": {
"architecture": "x86_64",
"hostname": "mashhurs-macbook-pro.local",
"id": "6F56EC02-BC0B-50C7-A3C4-A414CB348C79",
"ip": [
"10.236.165.121",
"fe80::1848:3d0c:b1c8:47aa",
"fe80::957e:345:d271:b978",
"fe80::9b2a:96c0:39a4:d7ca",
"fe80::ac34:f7ff:fe89:8259",
"fe80::b010:4f46:40d5:9d21",
"fe80::ce81:b1c:bd2c:69e",
"fe80::f4d4:88ff:fe6a:a6b2"
],
"mac": [
"36-73-F9-F5-80-80",
"36-73-F9-F5-80-84",
"36-73-F9-F5-80-88",
"AE-34-F7-89-82-59",
"F2-39-B9-E9-DB-17",
"F2-39-B9-E9-DB-18",
"F2-39-B9-E9-DB-19",
"F2-39-B9-E9-DB-F7",
"F2-39-B9-E9-DB-F8",
"F2-39-B9-E9-DB-F9",
"F4-D4-88-6A-A6-B2",
"F6-D4-88-6A-A6-B2"
],
"name": "mashhurs-macbook-pro.local",
"os": {
"build": "23F79",
"family": "darwin",
"kernel": "23.5.0",
"name": "macOS",
"platform": "darwin",
"type": "macos",
"version": "14.5"
}
},
"input": {
"type": "cel"
},
"logstash": {
"pipeline": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"host": {
"address": "127.0.0.1:9600",
"name": "Mashhurs-MacBook-Pro.local"
},
"id": "a3bf686e789a4fbb312192cf2f27667831f9c56b7c9394c1f2d8b933fe5c3ddc",
"name": "test",
"plugin": {
"output": {
"events": {
"in": 2281,
"out": 2281
},
"flow": {
"worker_millis_per_event": {
"current": 0.636,
"last_1_minute": 0.41
},
"worker_utilization": {
"current": 0.063,
"last_1_minute": 0.041
}
},
"id": "0cd257dc85333d8d8e627ca21d9065b34d279e95f8c17f317addb3f3f731d3e0",
"name": "stdout",
"source": {
"column": "64",
"id": "config_string",
"line": 1,
"protocol": "string"
},
"time": {
"duration": {
"ms": 1510
}
}
},
"type": "output"
}
}
}
},
"sort": [
1717435170221
]
}
]
}
}
]

View file

@ -40,6 +40,11 @@
"workers" : 12
}
}
},
"elasticsearch": {
"cluster": {
"id": "1nmp"
}
}
},
"metricset": {
@ -95,6 +100,11 @@
"workers" : 1
}
}
},
"elasticsearch": {
"cluster": {
"id": "1nmp"
}
}
},
"metricset": {
@ -150,6 +160,11 @@
"workers" : 44
}
}
},
"elasticsearch": {
"cluster": {
"id": "1nmp"
}
}
},
"metricset": {
@ -205,6 +220,11 @@
"workers" : 12
}
}
},
"elasticsearch": {
"cluster": {
"id": "1nmp"
}
}
},
"metricset": {

View file

@ -49,6 +49,11 @@
"workers" : 1
}
}
},
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
}
},
"metricset": {
@ -113,6 +118,11 @@
}
}
}
},
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
}
},
"metricset": {
@ -137,6 +147,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -201,6 +216,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -265,6 +285,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -329,6 +354,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -393,6 +423,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -457,6 +492,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -521,6 +561,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -585,6 +630,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -649,6 +699,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -713,6 +768,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -777,6 +837,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -841,6 +906,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -905,6 +975,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -969,6 +1044,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {
@ -1033,6 +1113,11 @@
"_score" : 0.0,
"_source" : {
"logstash": {
"elasticsearch": {
"cluster": {
"id": "mnmp"
}
},
"node": {
"state": {
"pipeline" : {

View file

@ -0,0 +1,393 @@
[
{
"hits" : {
"hits": [
{
"_index": ".ds-metrics-logstash.node-default-2024.06.03-000001",
"_id": "0NAH348BglF7bSquK97A",
"_source": {
"logstash": {
"node": {
"stats": {
"logstash": {
"pipeline": {
"batch_delay": 50,
"batch_size": 125,
"workers": 10
},
"pipelines": [
"another_test",
"test"
],
"http_address": "127.0.0.1:9600",
"host": "Mashhurs-MacBook-Pro.local",
"name": "Mashhurs-MacBook-Pro.local",
"ephemeral_id": "224e3687-15b2-4e91-84bb-dbb785742c73",
"uuid": "a755552f-9ef8-4f01-abae-394a59352f2d",
"version": "8.15.0",
"status": "green"
}
}
},
"elasticsearch": {
"cluster": {
"id": [
"1n1p"
]
}
}
},
"input": {
"type": "cel"
},
"agent": {
"name": "Mashhurs.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"@timestamp": "2024-06-03T16:55:30.213Z",
"ecs": {
"version": "8.0.0"
},
"data_stream": {
"namespace": "default",
"type": "metrics",
"dataset": "logstash.node"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"host": {
"id": "cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5"
}
}
},
{
"_index": ".ds-metrics-logstash.node-default-2024.06.03-000001",
"_id": "1NAH348BglF7bSquK97A",
"_source": {
"logstash": {
"node": {
"stats": {
"logstash": {
"pipeline": {
"batch_delay": 50,
"batch_size": 125,
"workers": 10
},
"pipelines": [
"another_test",
"test"
],
"http_address": "127.0.0.1:9600",
"host": "Mashhurs-MacBook-Pro.local",
"name": "Mashhurs-MacBook-Pro.local",
"ephemeral_id": "224e3687-15b2-4e91-84bb-dbb785742c73",
"uuid": "a755552f-9ef8-4f01-abae-394a59352f2d",
"version": "8.15.0",
"status": "green"
}
}
},
"elasticsearch": {
"cluster": {
"id": [
"1nmp"
]
}
}
},
"input": {
"type": "cel"
},
"agent": {
"name": "Mashhurs.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"@timestamp": "2024-06-03T16:55:30.213Z",
"ecs": {
"version": "8.0.0"
},
"data_stream": {
"namespace": "default",
"type": "metrics",
"dataset": "logstash.node"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"host": {
"id": "47a70feb-3cb5-4618-8670-2c0bada61acd"
}
}
},
{
"_index": ".ds-metrics-logstash.node-default-2024.06.03-000001",
"_id": "2NAH348BglF7bSquK97A",
"_source": {
"logstash": {
"node": {
"stats": {
"logstash": {
"pipeline": {
"batch_delay": 50,
"batch_size": 125,
"workers": 10
},
"pipelines": [
"another_test",
"test"
],
"http_address": "127.0.0.1:9600",
"host": "Mashhurs-MacBook-Pro.local",
"name": "Mashhurs-MacBook-Pro.local",
"ephemeral_id": "224e3687-15b2-4e91-84bb-dbb785742c73",
"uuid": "a755552f-9ef8-4f01-abae-394a59352f2d",
"version": "8.15.0",
"status": "green"
}
}
},
"elasticsearch": {
"cluster": {
"id": [
"1nmp"
]
}
}
},
"input": {
"type": "cel"
},
"agent": {
"name": "Mashhurs.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"@timestamp": "2024-06-03T16:55:30.213Z",
"ecs": {
"version": "8.0.0"
},
"data_stream": {
"namespace": "default",
"type": "metrics",
"dataset": "logstash.node"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"host": {
"id": "5a65d966-0330-4bd7-82f2-ee81040c13cf"
}
}
},
{
"_index": ".ds-metrics-logstash.node-default-2024.06.03-000001",
"_id": "3NAH348BglF7bSquK97A",
"_source": {
"logstash": {
"node": {
"stats": {
"logstash": {
"pipeline": {
"batch_delay": 50,
"batch_size": 125,
"workers": 10
},
"pipelines": [
"another_test",
"test"
],
"http_address": "127.0.0.1:9600",
"host": "Mashhurs-MacBook-Pro.local",
"name": "Mashhurs-MacBook-Pro.local",
"ephemeral_id": "224e3687-15b2-4e91-84bb-dbb785742c73",
"uuid": "a755552f-9ef8-4f01-abae-394a59352f2d",
"version": "8.15.0",
"status": "green"
}
}
},
"elasticsearch": {
"cluster": {
"id": [
"mnmp"
]
}
}
},
"input": {
"type": "cel"
},
"agent": {
"name": "Mashhurs.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"@timestamp": "2024-06-03T16:55:30.213Z",
"ecs": {
"version": "8.0.0"
},
"data_stream": {
"namespace": "default",
"type": "metrics",
"dataset": "logstash.node"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"host": {
"id": "2fcd4161-e08f-4eea-818b-703ea3ec6389"
}
}
},
{
"_index": ".ds-metrics-logstash.node-default-2024.06.03-000001",
"_id": "4NAH348BglF7bSquK97A",
"_source": {
"logstash": {
"node": {
"stats": {
"logstash": {
"pipeline": {
"batch_delay": 50,
"batch_size": 125,
"workers": 10
},
"pipelines": [
"another_test",
"test"
],
"http_address": "127.0.0.1:9600",
"host": "Mashhurs-MacBook-Pro.local",
"name": "Mashhurs-MacBook-Pro.local",
"ephemeral_id": "224e3687-15b2-4e91-84bb-dbb785742c73",
"uuid": "a755552f-9ef8-4f01-abae-394a59352f2d",
"version": "8.15.0",
"status": "green"
}
}
},
"elasticsearch": {
"cluster": {
"id": [
"mnmp"
]
}
}
},
"input": {
"type": "cel"
},
"agent": {
"name": "Mashhurs.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"@timestamp": "2024-06-03T16:55:30.213Z",
"ecs": {
"version": "8.0.0"
},
"data_stream": {
"namespace": "default",
"type": "metrics",
"dataset": "logstash.node"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"host": {
"id": "c6785d63-6e5f-42c2-839d-5edf139b7c19"
}
}
},
{
"_index": ".ds-metrics-logstash.node-default-2024.06.03-000001",
"_id": "5NAH348BglF7bSquK97A",
"_source": {
"logstash": {
"node": {
"stats": {
"logstash": {
"pipeline": {
"batch_delay": 50,
"batch_size": 125,
"workers": 10
},
"pipelines": [
"another_test",
"test"
],
"http_address": "127.0.0.1:9600",
"host": "Mashhurs-MacBook-Pro.local",
"name": "Mashhurs-MacBook-Pro.local",
"ephemeral_id": "224e3687-15b2-4e91-84bb-dbb785742c73",
"uuid": "a755552f-9ef8-4f01-abae-394a59352f2d",
"version": "8.15.0",
"status": "green"
}
}
},
"elasticsearch": {
"cluster": {
"id": [
"mnmp"
]
}
}
},
"input": {
"type": "cel"
},
"agent": {
"name": "Mashhurs.local",
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"ephemeral_id": "893247f4-d8da-4d3d-a9bf-5c7e2831a47b",
"type": "filebeat",
"version": "8.13.4"
},
"@timestamp": "2024-06-03T16:55:30.213Z",
"ecs": {
"version": "8.0.0"
},
"data_stream": {
"namespace": "default",
"type": "metrics",
"dataset": "logstash.node"
},
"elastic_agent": {
"id": "ef37141b-605e-4b6e-a69f-ec525f8dcdd4",
"version": "8.13.4",
"snapshot": false
},
"host": {
"id": "bc6ef6f2-ecce-4328-96a2-002de41a144d"
}
}
}
]
}
}
]

View file

@ -43,9 +43,6 @@
"_id" : "rC2jSncB1VO1nvgvsKx6",
"_score" : null,
"_source" : {
"agent" : {
"type" : "metricbeat"
},
"logstash_stats" : {
"logstash" : {
"version" : "7.8.0",

View file

@ -10,12 +10,26 @@ import sinon from 'sinon';
import { getStackStats, getAllStats, handleAllStats } from './get_all_stats';
import { ESClusterStats } from './get_es_stats';
import { KibanaStats } from './get_kibana_stats';
import { LogstashStatsByClusterUuid } from './get_logstash_stats';
import { LogstashStatsByClusterUuid } from './logstash_monitoring';
import { CatIndicesResponse } from '@elastic/elasticsearch/lib/api/types';
describe('get_all_stats', () => {
const timestamp = Date.now();
const searchMock = sinon.stub();
const callCluster = { search: searchMock } as unknown as ElasticsearchClient;
const infoMock = sinon.stub().returns({ cluster_uuid: 'cluster-uuid-1' });
const catIndicesMock = { indices: sinon.stub() };
const records: CatIndicesResponse = [
{
index: 'monitoring-logstash-8-test',
},
];
catIndicesMock.indices.returns(Promise.resolve(records));
const callCluster = {
search: searchMock,
info: infoMock,
cat: catIndicesMock,
} as unknown as ElasticsearchClient;
afterEach(() => {
searchMock.reset();
});
@ -160,6 +174,7 @@ describe('get_all_stats', () => {
collection_types: {
internal_collection: 1,
},
monitoringClusterUuid: 'cluster-uuid-1',
pipelines: {},
plugins: [],
},
@ -174,16 +189,13 @@ describe('get_all_stats', () => {
.onCall(1)
.returns(Promise.resolve(kibanaStatsResponse))
.onCall(2)
.returns(Promise.resolve(logstashStatsResponse)) // to define if it is internal monitoring or not
.returns(Promise.resolve(logstashStatsResponse))
.returns(Promise.resolve({}))
.onCall(3)
.returns(Promise.resolve({})) // Beats stats
.returns(Promise.resolve({}))
.onCall(4)
.returns(Promise.resolve({})) // Beats state
.returns(Promise.resolve(logstashStatsResponse))
.onCall(5)
.returns(Promise.resolve(logstashStatsResponse)) // Logstash stats
.onCall(6)
.returns(Promise.resolve({})); // Logstash state
.returns(Promise.resolve(logstashStatsResponse));
expect(await getAllStats(['a'], callCluster, timestamp, 1)).toStrictEqual(allClusters);
});

View file

@ -19,7 +19,8 @@ import {
import { getElasticsearchStats, ESClusterStats } from './get_es_stats';
import { getKibanaStats, KibanaStats } from './get_kibana_stats';
import { getBeatsStats, BeatsStatsByClusterUuid } from './get_beats_stats';
import { getLogstashStats, LogstashStatsByClusterUuid } from './get_logstash_stats';
import { getLogstashStats } from './get_logstash_stats';
import { LogstashStatsByClusterUuid } from './logstash_monitoring';
/**
* Get statistics for all products joined by Elasticsearch cluster.
@ -68,7 +69,7 @@ export function handleAllStats(
beats: BeatsStatsByClusterUuid;
}
) {
return clusters.map((cluster) => {
const mappedClusters = clusters.map((cluster) => {
const stats = {
...cluster,
stack_stats: {
@ -84,6 +85,29 @@ export function handleAllStats(
return stats;
});
const mappedClusterUuids = mappedClusters.map(
(cluster) => cluster.cluster_uuid || cluster?.elasticsearch?.cluster?.id
);
// Logstash agent driven monitoring isn't based on cluster UUID
// or standalone LS clusters will be reported with monitoring cluster UUIDs
const logstashOrphanClusterStats = logstash
? Object.entries(logstash)
.filter(([clusterUuid]) => !mappedClusterUuids.includes(clusterUuid))
.map(([clusterUuid, logstashBaseStats]) => ({
cluster_name: LOGSTASH_SYSTEM_ID,
timestamp: `${moment.utc().format()}`,
version:
logstashBaseStats.versions.length > 0 ? logstashBaseStats.versions[0].version : '',
cluster_uuid: clusterUuid,
stack_stats: {
[LOGSTASH_SYSTEM_ID]: logstashBaseStats,
},
cluster_stats: {},
}))
: [];
return mappedClusters.concat(logstashOrphanClusterStats);
}
export function getStackStats<T extends { [clusterUuid: string]: K }, K>(

View file

@ -12,7 +12,7 @@ import { createQuery } from './create_query';
import {
INDEX_PATTERN_KIBANA,
INDEX_PATTERN_BEATS,
INDEX_PATTERN_LOGSTASH,
INDEX_PATTERN_LOGSTASH_MONITORING,
KIBANA_SYSTEM_ID,
BEATS_SYSTEM_ID,
APM_SYSTEM_ID,
@ -230,7 +230,7 @@ function getIndexPatternForStackProduct(product: string) {
case APM_SYSTEM_ID:
return INDEX_PATTERN_BEATS;
case LOGSTASH_SYSTEM_ID:
return INDEX_PATTERN_LOGSTASH;
return INDEX_PATTERN_LOGSTASH_MONITORING;
}
return null;
}
@ -238,7 +238,6 @@ function getIndexPatternForStackProduct(product: string) {
/**
* Get statistics about selected Elasticsearch clusters, for the selected {@code product}.
*
* @param {Object} server The server instance
* @param {function} callCluster The callWithRequest or callWithInternalUser handler
* @param {Array} clusterUuids The string Cluster UUIDs to fetch details for
* @param {Date} start Start time to limit the stats
@ -246,6 +245,7 @@ function getIndexPatternForStackProduct(product: string) {
* @param {String} product The product to limit too ('kibana', 'logstash', 'beats')
*
* Returns an object keyed by the cluster UUIDs to make grouping easier.
* @param maxBucketSize size of the return bucket
*/
export async function getHighLevelStats(
callCluster: ElasticsearchClient,

View file

@ -5,729 +5,77 @@
* 2.0.
*/
import {
fetchLogstashStats,
fetchLogstashState,
processStatsResults,
processLogstashStateResults,
} from './get_logstash_stats';
import sinon from 'sinon';
import { ElasticsearchClient } from '@kbn/core/server';
import { getLogstashStats, logstashMonitoringInstances } from './get_logstash_stats';
import { ElasticsearchClient } from '@kbn/core-elasticsearch-server';
import { CatIndicesResponse } from '@elastic/elasticsearch/lib/api/types';
// eslint-disable-next-line @typescript-eslint/no-var-requires
const logstashStatsResultSetOfSelfMonitoring = require('./__mocks__/fixtures/logstash_stats_self_monitoring_results.json');
// eslint-disable-next-line @typescript-eslint/no-var-requires
const logstashStatsResultSetOfMetricbeatMonitoring = require('./__mocks__/fixtures/logstash_stats_metricbeat_monitoring_results.json');
const logstashStateResultsMapOfSelfMonitoring = new Map();
const logstashStateResultsMapOfMetricbeatMonitoring = new Map();
// Load data for state results.
['1n1p', '1nmp', 'mnmp'].forEach((data) => {
logstashStateResultsMapOfSelfMonitoring.set(
data,
// eslint-disable-next-line @typescript-eslint/no-var-requires
require(`./__mocks__/fixtures/logstash_state_self_monitoring_results_${data}`)
);
logstashStateResultsMapOfMetricbeatMonitoring.set(
data,
// eslint-disable-next-line @typescript-eslint/no-var-requires
require(`./__mocks__/fixtures/logstash_state_metricbeat_monitoring_results_${data}`)
);
});
const getBaseOptions = () => ({
clusters: {},
allEphemeralIds: {},
versions: {},
plugins: {},
});
describe('Get Logstash Stats', () => {
const clusterUuids = ['aCluster', 'bCluster', 'cCluster'];
describe('Get Logstash stats', function () {
const searchMock = sinon.stub();
const callCluster = { search: searchMock } as unknown as ElasticsearchClient;
const start = '2022-03-09T00:00:00.000Z';
const end = '2022-03-09T00:20:00.000Z';
const infoMock = sinon.stub().returns({ cluster_uuid: 'cluster-uuid-1' });
const catIndicesMock = { indices: sinon.stub() };
beforeEach(() => {
searchMock.returns(Promise.resolve({}));
});
afterEach(() => {
searchMock.reset();
test('validates self monitoring instance execution', async () => {
const records: CatIndicesResponse = [
{
index: 'monitoring-logstash-8-test',
},
];
catIndicesMock.indices.returns(Promise.resolve(records));
const callCluster = {
search: searchMock,
info: infoMock,
cat: catIndicesMock,
} as unknown as ElasticsearchClient;
const collectMetricsSpy = sinon.spy(logstashMonitoringInstances.self, 'collectMetrics');
await getLogstashStats(callCluster, ['cluster1'], 'start', 'end');
expect(collectMetricsSpy.calledOnce).toBe(true);
});
describe('fetchLogstashState', () => {
const clusterUuid = 'a';
const ephemeralIds = ['a', 'b', 'c'];
it('should create the logstash state query correctly for legacy monitoring', async () => {
const expected = {
bool: {
filter: [
{
terms: {
'logstash_state.pipeline.ephemeral_id': ['a', 'b', 'c'],
},
},
{
bool: {
should: [
{ term: { type: 'logstash_state' } },
{ term: { 'metricset.name': 'node' } },
],
},
},
],
},
};
test('validates Metricbeat instance execution', async () => {
const records: CatIndicesResponse = [
{
index: '.ds-metrics-logstash.stack_monitoring-test',
},
];
await fetchLogstashState(callCluster, clusterUuid, ephemeralIds, start, end, {} as any, true);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.query).toEqual(expected);
});
catIndicesMock.indices.returns(Promise.resolve(records));
it('should create the logstash state query correctly for metricbeat monitoring', async () => {
const expected = {
bool: {
filter: [
{
terms: {
'logstash.node.state.pipeline.ephemeral_id': ['a', 'b', 'c'],
},
},
{
bool: {
should: [
{ term: { type: 'logstash_state' } },
{ term: { 'metricset.name': 'node' } },
],
},
},
],
},
};
const callCluster = {
search: searchMock,
info: infoMock,
cat: catIndicesMock,
} as unknown as ElasticsearchClient;
await fetchLogstashState(
callCluster,
clusterUuid,
ephemeralIds,
start,
end,
{} as any,
false
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.query).toEqual(expected);
});
it('should set `size: 10` in the query', async () => {
await fetchLogstashState(callCluster, clusterUuid, ephemeralIds, start, end, {} as any, true);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.size).toEqual(ephemeralIds.length);
});
const collectMetricsSpy = sinon.spy(logstashMonitoringInstances.metricbeat, 'collectMetrics');
await getLogstashStats(callCluster, ['cluster1'], 'start', 'end');
expect(collectMetricsSpy.calledOnce).toBe(true);
});
describe('fetchLogstashStats', () => {
it('should create proper query for legacy monitoring', async () => {
const expectedQuery = {
bool: {
filter: [
{
range: {
timestamp: {
format: 'epoch_millis',
gte: 1646784000000,
lte: 1646785200000,
},
},
},
{
term: {
cluster_uuid: clusterUuids[0], // cluster_uuid is an alias works for both mertricbeat and legacy structure
},
},
{
bool: {
should: [
{ term: { type: 'logstash_stats' } },
{ term: { 'metricset.name': 'node_stats' } },
],
},
},
],
},
};
test('validates agent monitoring instance execution', async () => {
const records: CatIndicesResponse = [
{
index: '.ds-metrics-logstash.node-test',
},
];
await fetchLogstashStats(callCluster, clusterUuids[0], start, end, {} as any, true);
const { args } = searchMock.firstCall;
const [{ body }] = args;
catIndicesMock.indices.returns(Promise.resolve(records));
expect(body.from).toEqual(0);
expect(body.size).toEqual(10000);
expect(body.query).toEqual(expectedQuery);
});
const callCluster = {
search: searchMock,
info: infoMock,
cat: catIndicesMock,
} as unknown as ElasticsearchClient;
const collectMetricsSpy = sinon.spy(logstashMonitoringInstances.agent, 'collectMetrics');
await getLogstashStats(callCluster, ['cluster1'], 'start', 'end');
expect(collectMetricsSpy.calledOnce).toBe(true);
});
describe.each([
[true, 'processLogstashStatsResults with legacy monitoring'],
[false, 'processLogstashStatsResults with metricbeat monitoring'],
])(
'processLogstashStatsResults with self monitoring: %s',
(isLogstashSelfMonitoring, conditionDescription) => {
it('should summarize empty results', () => {
const resultsEmpty = undefined;
const options = getBaseOptions();
processStatsResults(resultsEmpty as any, options, isLogstashSelfMonitoring);
expect(options.clusters).toStrictEqual({});
});
it('should summarize single result with some missing fields', () => {
const source = isLogstashSelfMonitoring
? {
type: 'logstash_stats',
cluster_uuid: 'FlV4ckTxQ0a78hmBkzzc9A',
logstash_stats: {
logstash: {
uuid: '61de393a-f2b6-4b6c-8cea-22661f9c4134',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5',
queue: {
type: 'memory',
},
},
],
},
}
: {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
elasticsearch: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
},
node: {
stats: {
logstash: {
uuid: '61de393a-f2b6-4b6c-8cea-22661f9c4134',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const results = {
hits: {
hits: [
{
_source: source,
},
],
},
};
const options = getBaseOptions();
processStatsResults(results as any, options, isLogstashSelfMonitoring);
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
internal_collection: 1,
},
pipelines: {},
queues: {
memory: 1,
},
},
versions: [],
},
});
});
it('should retrieve all ephemeral ids from all hits for the same cluster', () => {
const source1 = isLogstashSelfMonitoring
? {
type: 'logstash_stats',
cluster_uuid: 'FlV4ckTxQ0a78hmBkzzc9A',
logstash_stats: {
logstash: {
uuid: '0000000-0000-0000-0000-000000000000',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
queue: {
type: 'memory',
},
},
],
},
}
: {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
elasticsearch: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
},
node: {
stats: {
logstash: {
uuid: '0000000-0000-0000-0000-000000000000',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const source2 = isLogstashSelfMonitoring
? {
type: 'logstash_stats',
cluster_uuid: 'FlV4ckTxQ0a78hmBkzzc9A',
logstash_stats: {
logstash: {
uuid: '11111111-1111-1111-1111-111111111111',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
queue: {
type: 'memory',
},
},
],
},
}
: {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
elasticsearch: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
},
node: {
stats: {
logstash: {
uuid: '11111111-1111-1111-1111-111111111111',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const source3 = isLogstashSelfMonitoring
? {
type: 'logstash_stats',
cluster_uuid: '3',
logstash_stats: {
logstash: {
uuid: '22222222-2222-2222-2222-222222222222',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cccccccc-cccc-cccc-cccc-cccccccccccc',
queue: {
type: 'memory',
},
},
],
},
}
: {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: '3',
},
elasticsearch: {
cluster: {
id: '3',
},
},
node: {
stats: {
logstash: {
uuid: '22222222-2222-2222-2222-222222222222',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cccccccc-cccc-cccc-cccc-cccccccccccc',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const results = {
hits: {
hits: [
{
_source: source1,
},
{
_source: source2,
},
{
_source: source3,
},
],
},
};
const options = getBaseOptions();
processStatsResults(results as any, options, isLogstashSelfMonitoring);
expect(options.allEphemeralIds).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: [
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
],
'3': ['cccccccc-cccc-cccc-cccc-cccccccccccc'],
});
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 2,
cluster_stats: {
plugins: [],
collection_types: {
internal_collection: 2,
},
pipelines: {},
queues: {
memory: 2,
},
},
versions: [],
},
'3': {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
internal_collection: 1,
},
pipelines: {},
queues: {
memory: 1,
},
},
versions: [],
},
});
});
it('should summarize stats from hits across multiple result objects', () => {
const options = getBaseOptions();
const logstashStatsResultSet = isLogstashSelfMonitoring
? logstashStatsResultSetOfSelfMonitoring
: logstashStatsResultSetOfMetricbeatMonitoring;
const logstashStateResultsMap = isLogstashSelfMonitoring
? logstashStateResultsMapOfSelfMonitoring
: logstashStateResultsMapOfMetricbeatMonitoring;
// logstashStatsResultSet is an array of many small query results
logstashStatsResultSet.forEach((results: any) => {
processStatsResults(results, options, isLogstashSelfMonitoring);
});
logstashStateResultsMap.forEach((value: string[], clusterUuid: string) => {
value.forEach((results: any) => {
processLogstashStateResults(results, clusterUuid, options, isLogstashSelfMonitoring);
});
});
expect(options.allEphemeralIds).toStrictEqual({
'1n1p': ['cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5'],
'1nmp': [
'47a70feb-3cb5-4618-8670-2c0bada61acd',
'5a65d966-0330-4bd7-82f2-ee81040c13cf',
'8d33fe25-a2c0-4c54-9ecf-d218cb8dbfe4',
'f4167a94-20a8-43e7-828e-4cf38d906187',
],
mnmp: [
'2fcd4161-e08f-4eea-818b-703ea3ec6389',
'c6785d63-6e5f-42c2-839d-5edf139b7c19',
'bc6ef6f2-ecce-4328-96a2-002de41a144d',
'72058ad1-68a1-45f6-a8e8-10621ffc7288',
'18593052-c021-4158-860d-d8122981a0ac',
'4207025c-9b00-4bea-a36c-6fbf2d3c215e',
'0ec4702d-b5e5-4c60-91e9-6fa6a836f0d1',
'41258219-b129-4fad-a629-f244826281f8',
'e73bc63d-561a-4acd-a0c4-d5f70c4603df',
'ddf882b7-be26-4a93-8144-0aeb35122651',
'602936f5-98a3-4f8c-9471-cf389a519f4b',
'8b300988-62cc-4bc6-9ee0-9194f3f78e27',
'6ab60531-fb6f-478c-9063-82f2b0af2bed',
'802a5994-a03c-44b8-a650-47c0f71c2e48',
'6070b400-5c10-4c5e-b5c5-a5bd9be6d321',
'3193df5f-2a34-4fe3-816e-6b05999aa5ce',
'994e68cd-d607-40e6-a54c-02a51caa17e0',
],
});
expect(options.clusters).toStrictEqual({
'1n1p': {
count: 1,
versions: [
{
count: 1,
version: '7.10.0',
},
],
cluster_stats: {
collection_types: {
...(isLogstashSelfMonitoring ? { internal_collection: 1 } : { metricbeat: 1 }),
},
pipelines: {
batch_size_avg: 125,
batch_size_max: 125,
batch_size_min: 125,
batch_size_total: 125,
count: 1,
sources: {
file: true,
},
workers_avg: 1,
workers_max: 1,
workers_min: 1,
workers_total: 1,
},
plugins: [
{
count: 1,
name: 'logstash-input-stdin',
},
{
count: 1,
name: 'logstash-input-elasticsearch',
},
{
count: 3,
name: 'logstash-filter-mutate',
},
{
count: 3,
name: 'logstash-filter-ruby',
},
{
count: 1,
name: 'logstash-filter-split',
},
{
count: 1,
name: 'logstash-filter-elasticsearch',
},
{
count: 1,
name: 'logstash-filter-aggregate',
},
{
count: 1,
name: 'logstash-filter-drop',
},
{
count: 1,
name: 'logstash-output-elasticsearch',
},
{
count: 1,
name: 'logstash-output-stdout',
},
],
queues: {
memory: 1,
},
},
},
'1nmp': {
count: 1,
versions: [
{
count: 1,
version: '7.8.0',
},
],
cluster_stats: {
collection_types: {
...(isLogstashSelfMonitoring ? { internal_collection: 1 } : { metricbeat: 1 }),
},
pipelines: {
batch_size_avg: 406.5,
batch_size_max: 1251,
batch_size_min: 125,
batch_size_total: 1626,
count: 4,
sources: {
xpack: true,
},
workers_avg: 17.25,
workers_max: 44,
workers_min: 1,
workers_total: 69,
},
plugins: [
{
count: 4,
name: 'logstash-input-stdin',
},
{
count: 4,
name: 'logstash-output-stdout',
},
],
queues: {
memory: 3,
persisted: 1,
},
},
},
mnmp: {
count: 3,
versions: [
{
count: 1,
version: '7.9.2',
},
{
count: 1,
version: '7.9.1',
},
{
count: 1,
version: '7.10.0',
},
],
cluster_stats: {
collection_types: {
...(isLogstashSelfMonitoring ? { internal_collection: 3 } : { metricbeat: 3 }),
},
pipelines: {
batch_size_avg: 33.294117647058826,
batch_size_max: 125,
batch_size_min: 1,
batch_size_total: 566,
count: 17,
sources: {
file: true,
string: true,
},
workers_avg: 7.411764705882353,
workers_max: 16,
workers_min: 1,
workers_total: 126,
},
plugins: [
{
count: 1,
name: 'logstash-input-stdin',
},
{
count: 1,
name: 'logstash-filter-clone',
},
{
count: 3,
name: 'logstash-output-pipeline',
},
{
count: 2,
name: 'logstash-input-pipeline',
},
{
count: 16,
name: 'logstash-filter-sleep',
},
{
count: 14,
name: 'logstash-output-stdout',
},
{
count: 14,
name: 'logstash-input-generator',
},
],
queues: {
memory: 3,
persisted: 14,
},
},
},
});
});
}
);
});

View file

@ -6,450 +6,29 @@
*/
import { ElasticsearchClient } from '@kbn/core/server';
import type * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import { createQuery } from './create_query';
import { mapToList } from './get_high_level_stats';
import { incrementByKey } from './get_high_level_stats';
import {
INDEX_PATTERN_LOGSTASH,
METRICBEAT_INDEX_NAME_UNIQUE_TOKEN,
TELEMETRY_QUERY_SOURCE,
} from '../../common/constants';
import { LogstashSelfMonitoring } from './logstash_self_monitoring';
import { LogstashMetricbeatMonitoring } from './logstash_metricbeat_monitoring';
import { LogstashAgentMonitoring } from './logstash_agent_monitoring';
import {
LogstashMonitoring,
LogstashProcessOptions,
LogstashStatsByClusterUuid,
} from './logstash_monitoring';
import { INDEX_PATTERN_LOGSTASH, TELEMETRY_QUERY_SOURCE } from '../../common/constants';
const SELF_MONITORING: string = 'self';
const METRICBEAT_MONITORING: string = 'metricbeat';
const AGENT_MONITORING: string = 'agent';
type Counter = Map<string, number>;
const HITS_SIZE = 10000; // maximum hits to receive from ES with each search
export interface LogstashBaseStats {
// stats
versions: Array<{ version: string; count: number }>;
count: number;
cluster_stats?: {
collection_types?: { [collection_type_type: string]: number };
queues?: { [queue_type: string]: number };
plugins?: Array<{ name: string; count: number }>;
pipelines?: {
count?: number;
batch_size_max?: number;
batch_size_avg?: number;
batch_size_min?: number;
batch_size_total?: number;
workers_max?: number;
workers_avg?: number;
workers_min?: number;
workers_total?: number;
sources?: { [source_type: string]: boolean };
};
};
}
const getLogstashBaseStats = () => ({
versions: [],
count: 0,
cluster_stats: {
pipelines: {},
plugins: [],
},
});
export interface LogstashStats {
cluster_uuid: string;
source_node: string;
type: string;
agent?: {
type: string;
};
// legacy monitoring shape
logstash_stats?: {
pipelines?: [
{
id?: string;
ephemeral_id: string;
queue?: {
type: string;
};
}
];
logstash?: {
version?: string;
uuid?: string;
snapshot?: string;
};
};
// metricbeat monitoring shape
logstash?: {
node?: {
stats?: {
pipelines?: [
{
id?: string;
ephemeral_id: string;
queue?: {
type: string;
};
}
];
logstash?: {
version?: string;
uuid?: string;
snapshot?: string;
};
};
};
elasticsearch?: {
cluster?: {
id?: string;
};
};
};
}
export interface LogstashState {
// legacy monitoring shape
logstash_state?: {
pipeline?: {
batch_size?: number;
workers?: number;
representation?: {
graph?: {
vertices?: [
{
config_name?: string;
plugin_type?: string;
meta?: {
source?: {
protocol?: string;
};
};
}
];
};
};
};
};
// metricbeat monitoring shape
logstash?: {
node?: {
state?: {
pipeline?: {
batch_size?: number;
workers?: number;
representation?: {
graph?: {
vertices?: [
{
config_name?: string;
plugin_type?: string;
meta?: {
source?: {
protocol?: string;
};
};
}
];
};
};
};
};
};
};
}
export interface LogstashProcessOptions {
clusters: { [clusterUuid: string]: LogstashBaseStats };
allEphemeralIds: { [clusterUuid: string]: string[] };
versions: { [clusterUuid: string]: Counter };
plugins: { [clusterUuid: string]: Counter };
}
/*
* Update a clusters object with processed Logstash stats
* @param {Array} results - array of LogstashStats docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} allEphemeralIds - EphemeralIds in an object keyed by cluster UUIDs to track the pipelines for the cluster
* @param {Object} versions - Versions in an object keyed by cluster UUIDs to track the logstash versions for the cluster
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
*/
export function processStatsResults(
results: estypes.SearchResponse<LogstashStats>,
{ clusters, allEphemeralIds, versions, plugins }: LogstashProcessOptions,
isSelfMonitoring: boolean
) {
const currHits = results?.hits?.hits || [];
currHits.forEach((hit) => {
const clusterUuid = isSelfMonitoring
? hit._source!.cluster_uuid
: hit._source!.logstash?.elasticsearch?.cluster?.id;
if (clusterUuid !== undefined && clusters[clusterUuid] === undefined) {
clusters[clusterUuid] = getLogstashBaseStats();
versions[clusterUuid] = new Map();
plugins[clusterUuid] = new Map();
}
const logstashStats = isSelfMonitoring
? hit._source?.logstash_stats
: hit._source?.logstash?.node?.stats;
if (clusterUuid !== undefined && logstashStats !== undefined) {
const clusterStats = clusters[clusterUuid].cluster_stats || {};
clusters[clusterUuid].count = (clusters[clusterUuid].count || 0) + 1;
const thisVersion = logstashStats.logstash?.version;
const a: Counter = versions[clusterUuid];
incrementByKey(a, thisVersion);
clusters[clusterUuid].versions = mapToList(a, 'version');
// Internal Collection has no agent field, so default to 'internal_collection'
let thisCollectionType = isSelfMonitoring ? 'internal_collection' : hit._source?.agent?.type;
if (thisCollectionType === undefined) {
thisCollectionType = 'internal_collection';
}
if (!clusterStats.hasOwnProperty('collection_types')) {
clusterStats.collection_types = {};
}
clusterStats.collection_types![thisCollectionType] =
(clusterStats.collection_types![thisCollectionType] || 0) + 1;
const pipelines = logstashStats.pipelines || [];
pipelines.forEach((pipeline) => {
const thisQueueType = pipeline.queue?.type;
if (thisQueueType !== undefined) {
if (!clusterStats.hasOwnProperty('queues')) {
clusterStats.queues = {};
}
clusterStats.queues![thisQueueType] = (clusterStats.queues![thisQueueType] || 0) + 1;
}
const ephemeralId = pipeline.ephemeral_id;
if (ephemeralId !== undefined) {
allEphemeralIds[clusterUuid] = allEphemeralIds[clusterUuid] || [];
allEphemeralIds[clusterUuid].push(ephemeralId);
}
});
}
});
}
/*
* Update a clusters object with logstash state details
* @param {Array} results - array of LogstashState docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
*/
export function processLogstashStateResults(
results: estypes.SearchResponse<LogstashState>,
clusterUuid: string,
{ clusters, versions, plugins }: LogstashProcessOptions,
isSelfMonitoring: boolean
) {
const currHits = results?.hits?.hits || [];
const clusterStats = clusters[clusterUuid].cluster_stats;
const pipelineStats = clusters[clusterUuid].cluster_stats?.pipelines;
currHits.forEach((hit) => {
const thisLogstashStatePipeline = isSelfMonitoring
? hit._source?.logstash_state?.pipeline
: hit._source?.logstash?.node?.state?.pipeline;
if (pipelineStats !== undefined && thisLogstashStatePipeline !== undefined) {
pipelineStats.count = (pipelineStats.count || 0) + 1;
const thisPipelineBatchSize = thisLogstashStatePipeline.batch_size;
if (thisPipelineBatchSize !== undefined) {
pipelineStats.batch_size_total =
(pipelineStats.batch_size_total || 0) + thisPipelineBatchSize;
pipelineStats.batch_size_max = pipelineStats.batch_size_max || 0;
pipelineStats.batch_size_min = pipelineStats.batch_size_min || 0;
pipelineStats.batch_size_avg = pipelineStats.batch_size_total / pipelineStats.count;
if (thisPipelineBatchSize > pipelineStats.batch_size_max) {
pipelineStats.batch_size_max = thisPipelineBatchSize;
}
if (
pipelineStats.batch_size_min === 0 ||
thisPipelineBatchSize < pipelineStats.batch_size_min
) {
pipelineStats.batch_size_min = thisPipelineBatchSize;
}
}
const thisPipelineWorkers = thisLogstashStatePipeline.workers;
if (thisPipelineWorkers !== undefined) {
pipelineStats.workers_total = (pipelineStats.workers_total || 0) + thisPipelineWorkers;
pipelineStats.workers_max = pipelineStats.workers_max || 0;
pipelineStats.workers_min = pipelineStats.workers_min || 0;
pipelineStats.workers_avg = pipelineStats.workers_total / pipelineStats.count;
if (thisPipelineWorkers > pipelineStats.workers_max) {
pipelineStats.workers_max = thisPipelineWorkers;
}
if (pipelineStats.workers_min === 0 || thisPipelineWorkers < pipelineStats.workers_min) {
pipelineStats.workers_min = thisPipelineWorkers;
}
}
// Extract the vertices object from the pipeline representation. From this, we can
// retrieve the source of the pipeline element on the configuration(from file, string, or
// x-pack-config-management), and the input, filter and output plugins from that pipeline.
const vertices = thisLogstashStatePipeline.representation?.graph?.vertices;
if (vertices !== undefined) {
vertices.forEach((vertex) => {
const configName = vertex.config_name;
const pluginType = vertex.plugin_type;
let pipelineConfig = vertex.meta?.source?.protocol;
if (pipelineConfig !== undefined) {
if (pipelineConfig === 'string' || pipelineConfig === 'str') {
pipelineConfig = 'string';
} else if (pipelineConfig === 'x-pack-config-management') {
pipelineConfig = 'xpack';
} else {
pipelineConfig = 'file';
}
if (!pipelineStats.hasOwnProperty('sources')) {
pipelineStats.sources = {};
}
pipelineStats.sources![pipelineConfig] = true;
}
if (configName !== undefined && pluginType !== undefined) {
incrementByKey(plugins[clusterUuid], `logstash-${pluginType}-${configName}`);
}
});
}
}
});
if (clusterStats !== undefined) {
clusterStats.plugins = mapToList(plugins[clusterUuid], 'name');
}
}
export async function fetchLogstashStats(
callCluster: ElasticsearchClient,
clusterUuid: string,
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions,
isSelfMonitoring: boolean
): Promise<void> {
const statsField = isSelfMonitoring ? 'logstash_stats' : 'logstash.node.stats';
const filterPath: string[] = [
'hits.hits._source.cluster_uuid',
'hits.hits._source.type',
'hits.hits._source.source_node',
'hits.hits._source.agent.type',
'hits.hits._source.logstash.elasticsearch.cluster.id', // alias for cluster_uuid
`hits.hits._source.${statsField}.pipelines.id`,
`hits.hits._source.${statsField}.pipelines.ephemeral_id`,
`hits.hits._source.${statsField}.pipelines.queue.type`,
`hits.hits._source.${statsField}.logstash.version`,
`hits.hits._source.${statsField}.logstash.uuid`,
];
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
start,
end,
filters: [
{ term: { cluster_uuid: clusterUuid } },
{
bool: {
should: [
{ term: { type: 'logstash_stats' } },
{ term: { 'metricset.name': 'node_stats' } },
],
},
},
],
}) as estypes.QueryDslQueryContainer,
from: page * HITS_SIZE,
collapse: {
field: `${statsField}.logstash.uuid`,
},
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
size: HITS_SIZE,
},
};
const results = await callCluster.search<LogstashStats>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
processStatsResults(results, options, isSelfMonitoring);
}
return Promise.resolve();
}
export async function fetchLogstashState(
callCluster: ElasticsearchClient,
clusterUuid: string,
ephemeralIds: string[],
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions,
isSelfMonitoring: boolean
): Promise<void> {
const stateField = isSelfMonitoring ? 'logstash_state' : 'logstash.node.state';
const filterPath: string[] = [
`hits.hits._source.${stateField}.pipeline.batch_size`,
`hits.hits._source.${stateField}.pipeline.workers`,
`hits.hits._source.${stateField}.pipeline.representation.graph.vertices`,
`hits.hits._source.type`,
];
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
// intentionally not using start and end periods as we need node state info to fill plugin usages
// especially with metricbeat monitoring
filters: [
{ terms: { [`${stateField}.pipeline.ephemeral_id`]: ephemeralIds } },
{
bool: {
should: [
{ term: { type: 'logstash_state' } },
{ term: { 'metricset.name': 'node' } },
],
},
},
],
}) as estypes.QueryDslQueryContainer,
collapse: {
field: `${stateField}.pipeline.ephemeral_id`,
},
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
size: ephemeralIds.length,
},
};
const results = await callCluster.search<LogstashState>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
processLogstashStateResults(results, clusterUuid, options, isSelfMonitoring);
}
return Promise.resolve();
}
export interface LogstashStatsByClusterUuid {
[clusterUuid: string]: LogstashBaseStats;
}
export const logstashMonitoringInstances: { [key: string]: LogstashMonitoring } = {
self: new LogstashSelfMonitoring(),
metricbeat: new LogstashMetricbeatMonitoring(),
agent: new LogstashAgentMonitoring(),
};
/*
* Call the function for fetching and summarizing Logstash stats
@ -464,65 +43,72 @@ export async function getLogstashStats(
const options: LogstashProcessOptions = {
clusters: {}, // the result object to be built up
allEphemeralIds: {},
allHostIds: {},
versions: {},
plugins: {},
};
const monitoringClusterInfo = await callCluster.info();
const monitoringClusterUuid: string = monitoringClusterInfo.cluster_uuid;
// if index name contains '-mb', means metricbeat based monitoring
// filter_path and collapse fields in the queries differ on metricbeat vs. self-monitoring
// note: agent driven LS monitoring indices pattern differ ".ds-metrics-logstash*"
for (const clusterUuid of clusterUuids) {
const logstashMonitoringIndex: string = await getLogstashMonitoringIndex(
callCluster,
clusterUuid
);
// figure out the monitoring methods cluster is using based on the Logstash metrics indices
// mostly single method will be resolved
// multiple monitoring methods case might be due to migration (ex: from self to metricbeat)
const monitoringMethods: string[] = await getLogstashMonitoringMethods(callCluster);
// no need to proceed if we don't have monitoring metrics
if (logstashMonitoringIndex !== '') {
const isSelfMonitoring: boolean = logstashMonitoringIndex.indexOf('-mb') === -1;
await fetchLogstashStats(callCluster, clusterUuid, start, end, options, isSelfMonitoring);
if (options.clusters[clusterUuid] !== undefined) {
await fetchLogstashState(
callCluster,
clusterUuid,
options.allEphemeralIds[clusterUuid],
start,
end,
options,
isSelfMonitoring
);
}
// collect all _method_ (:self, :metricbeat, :agent) metrics in a given period
for (const monitoringMethod of monitoringMethods) {
const monitoringInstance = logstashMonitoringInstances[monitoringMethod];
if (monitoringInstance) {
await monitoringInstance.collectMetrics(
callCluster,
clusterUuids,
monitoringClusterUuid,
start,
end,
options
);
}
}
return options.clusters;
}
export async function getLogstashMonitoringIndex(
callCluster: ElasticsearchClient,
clusterUuid: string
): Promise<string> {
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH,
ignore_unavailable: true,
body: {
query: createQuery({
clusterUuid,
}) as estypes.QueryDslQueryContainer,
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
size: 1,
},
};
export async function getLogstashMonitoringMethods(
callCluster: ElasticsearchClient
): Promise<string[]> {
const response = await callCluster.cat.indices(
{ index: INDEX_PATTERN_LOGSTASH, format: 'json' },
{
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
}
);
const results = await callCluster.search<LogstashStats>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
const [firstDocument] = results.hits.hits;
return Promise.resolve(firstDocument._index);
const monitoringMethods: string[] = [];
for (const record of response) {
if (record.index!.indexOf('monitoring-logstash-') !== -1) {
if (record.index!.indexOf(METRICBEAT_INDEX_NAME_UNIQUE_TOKEN) !== -1) {
// legacy driven metricbeat monitoring
if (!monitoringMethods.includes(METRICBEAT_MONITORING)) {
monitoringMethods.push(METRICBEAT_MONITORING);
logstashMonitoringInstances.metricbeat.setIndexPattern('legacy');
}
} else {
if (!monitoringMethods.includes(SELF_MONITORING)) {
monitoringMethods.push(SELF_MONITORING);
}
}
} else if (record.index!.indexOf('metrics-logstash.node') !== -1) {
if (!monitoringMethods.includes(AGENT_MONITORING)) {
monitoringMethods.push(AGENT_MONITORING);
}
} else if (record.index!.indexOf('metrics-logstash.stack_monitoring') !== -1) {
if (!monitoringMethods.includes(METRICBEAT_MONITORING)) {
monitoringMethods.push(METRICBEAT_MONITORING);
logstashMonitoringInstances.metricbeat.setIndexPattern('stack');
}
}
}
return Promise.resolve('');
return monitoringMethods;
}

View file

@ -0,0 +1,582 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import sinon from 'sinon';
import { ElasticsearchClient } from '@kbn/core/server';
import { LogstashAgentMonitoring } from './logstash_agent_monitoring';
// eslint-disable-next-line @typescript-eslint/no-var-requires
const logstashStatsResultSetOfAgentMonitoring = require('./__mocks__/fixtures/logstash_stats_agent_monitoring_results.json');
const logstashStateResultsMapOfAgentMonitoring = new Map();
// Load data for state results.
['1n1p', '1nmp', 'mnmp'].forEach((data) => {
logstashStateResultsMapOfAgentMonitoring.set(
data,
// eslint-disable-next-line @typescript-eslint/no-var-requires
require(`./__mocks__/fixtures/logstash_state_agent_monitoring_results_${data}`)
);
});
const getBaseOptions = () => ({
clusters: {},
allHostIds: {},
allEphemeralIds: {},
versions: {},
plugins: {},
});
describe('LogstashAgentMonitoring', () => {
let agentMonitoring: LogstashAgentMonitoring;
const monitoringClusterUuid: string = 'monitoringClusterUuid';
const searchMock = sinon.stub();
const callCluster = { search: searchMock } as unknown as ElasticsearchClient;
const start = '2024-05-31T00:00:00.000Z';
const end = '2024-05-31T00:20:00.000Z';
beforeEach(() => {
agentMonitoring = new LogstashAgentMonitoring();
searchMock.returns(Promise.resolve({}));
});
afterEach(() => {
searchMock.reset();
});
describe('Logstash agent monitoring query test', () => {
const clusterUuid = 'a';
const hostIds = ['aHost', 'bHost', 'cHost'];
it('creates proper query for stats', async () => {
const expectedQuery = {
bool: {
filter: [
{
bool: {
should: [{ term: { 'data_stream.dataset': 'logstash.node' } }],
},
},
{
range: {
'@timestamp': {
format: 'epoch_millis',
gte: 1717113600000,
lte: 1717114800000,
},
},
},
],
},
};
await (agentMonitoring as any).fetchLogstashStats(
callCluster,
monitoringClusterUuid,
start,
end,
{} as any
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.from).toEqual(0);
expect(body.size).toEqual(10000);
expect(body.query).toEqual(expectedQuery);
});
it('creates the logstash state query correctly for state', async () => {
const expected = {
bool: {
filter: [
{
bool: {
should: [{ term: { 'data_stream.dataset': 'logstash.plugins' } }],
},
},
{
terms: {
'host.id': ['aHost', 'bHost', 'cHost'],
},
},
{
range: {
'@timestamp': {
format: 'epoch_millis',
gte: 1717113600000,
lte: 1717114800000,
},
},
},
],
},
};
await (agentMonitoring as any).fetchLogstashState(
callCluster,
hostIds,
clusterUuid,
start,
end,
{} as any
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.query).toEqual(expected);
});
});
describe('Process query results', () => {
it('summarizes empty results', () => {
const resultsEmpty = undefined;
const options = getBaseOptions();
(agentMonitoring as any).processStatsResults(
resultsEmpty as any,
options,
monitoringClusterUuid
);
expect(options.clusters).toStrictEqual({});
});
it('summarizes a result with monitoring cluster UUID', () => {
const source = {
logstash: {
node: {
stats: {
logstash: {
pipeline: {
batch_delay: 50,
batch_size: 125,
workers: 10,
},
pipelines: ['another_test', 'test'],
ephemeral_id: '224e3687-15b2-4e91-84bb-dbb785742c73',
uuid: 'a755552f-9ef8-4f01-abae-394a59352f2d',
version: '8.15.0',
status: 'green',
},
},
},
},
input: {
type: 'cel',
},
agent: {
name: 'Mashhurs.local',
id: 'ef37141b-605e-4b6e-a69f-ec525f8dcdd4',
type: 'filebeat',
},
'@timestamp': '2024-06-03T16:55:30.213Z',
data_stream: {
namespace: 'default',
type: 'metrics',
dataset: 'logstash.node',
},
host: {
id: '6F56EC02-BC0B-50C7-A3C4-A414CB348C79',
},
};
const results = {
hits: {
hits: [
{
_source: source,
},
],
},
};
const options = getBaseOptions();
(agentMonitoring as any).processStatsResults(
results as any,
options,
'FlV4ckTxQ0a78hmBkzzc9A'
);
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 1,
cluster_stats: {
collection_types: {
filebeat: 1,
},
monitoringClusterUuid: 'FlV4ckTxQ0a78hmBkzzc9A',
pipelines: {
count: 2,
},
plugins: [],
},
versions: [
{
count: 1,
version: '8.15.0',
},
],
},
});
});
it('summarizes a result with reported cluster UUID', () => {
const source = {
logstash: {
node: {
stats: {
logstash: {
pipeline: {
batch_delay: 50,
batch_size: 125,
workers: 10,
},
pipelines: ['another_test', 'test'],
ephemeral_id: '224e3687-15b2-4e91-84bb-dbb785742c73',
uuid: 'a755552f-9ef8-4f01-abae-394a59352f2d',
version: '8.15.0',
status: 'green',
},
},
},
elasticsearch: {
cluster: {
id: ['testClusterUuid'],
},
},
},
input: {
type: 'cel',
},
agent: {
name: 'Mashhurs.local',
id: 'ef37141b-605e-4b6e-a69f-ec525f8dcdd4',
type: 'filebeat',
},
'@timestamp': '2024-06-03T16:55:30.213Z',
data_stream: {
namespace: 'default',
type: 'metrics',
dataset: 'logstash.node',
},
host: {
id: '6F56EC02-BC0B-50C7-A3C4-A414CB348C79',
},
};
const results = {
hits: {
hits: [
{
_source: source,
},
],
},
};
const options = getBaseOptions();
(agentMonitoring as any).processStatsResults(
results as any,
options,
'FlV4ckTxQ0a78hmBkzzc9A'
);
expect(options.clusters).toStrictEqual({
testClusterUuid: {
count: 1,
cluster_stats: {
collection_types: {
filebeat: 1,
},
monitoringClusterUuid: 'FlV4ckTxQ0a78hmBkzzc9A',
pipelines: {
count: 2,
},
plugins: [],
},
versions: [
{
count: 1,
version: '8.15.0',
},
],
},
});
});
it('retrieves all host ids from the hits for the same cluster', () => {
const source1 = {
logstash: {
node: {
stats: {
logstash: {
pipeline: {
batch_delay: 50,
batch_size: 125,
workers: 10,
},
pipelines: ['another_test', 'test'],
ephemeral_id: '224e3687-15b2-4e91-84bb-dbb785742c73',
uuid: 'a755552f-9ef8-4f01-abae-394a59352f2d',
version: '8.15.0',
status: 'green',
},
},
},
elasticsearch: {
cluster: {
id: ['testClusterUuid'],
},
},
},
host: {
id: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
},
};
const source2 = {
logstash: {
node: {
stats: {
logstash: {
pipeline: {
batch_delay: 50,
batch_size: 125,
workers: 10,
},
pipelines: ['another_test', 'test'],
ephemeral_id: '224e3687-15b2-4e91-84bb-dbb785742c73',
uuid: 'a755552f-9ef8-4f01-abae-394a59352f2d',
version: '8.15.0',
status: 'green',
},
},
},
elasticsearch: {
cluster: {
id: ['testClusterUuid'],
},
},
},
host: {
id: 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
},
};
const source3 = {
logstash: {
node: {
stats: {
logstash: {
pipeline: {
batch_delay: 50,
batch_size: 125,
workers: 10,
},
pipelines: ['another_test', 'test'],
ephemeral_id: '224e3687-15b2-4e91-84bb-dbb785742c73',
uuid: 'a755552f-9ef8-4f01-abae-394a59352f2d',
version: '8.15.0',
status: 'green',
},
},
},
elasticsearch: {
cluster: {
id: ['3'],
},
},
},
host: {
id: 'cccccccc-cccc-cccc-cccc-cccccccccccc',
},
};
const results = {
hits: {
hits: [
{
_source: source1,
},
{
_source: source2,
},
{
_source: source3,
},
],
},
};
const options = getBaseOptions();
(agentMonitoring as any).processStatsResults(results as any, options, monitoringClusterUuid);
expect(options.allHostIds).toStrictEqual({
testClusterUuid: [
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
],
'3': ['cccccccc-cccc-cccc-cccc-cccccccccccc'],
});
expect(options.clusters).toStrictEqual({
testClusterUuid: {
count: 2,
cluster_stats: {
plugins: [],
collection_types: {
agent: 2,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
count: 2,
},
},
versions: [
{
count: 2,
version: '8.15.0',
},
],
},
'3': {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
agent: 1,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
count: 2,
},
},
versions: [
{
count: 1,
version: '8.15.0',
},
],
},
});
});
it('summarizes stats from hits across multiple result objects', () => {
const options = getBaseOptions();
// logstashStatsResultSet is an array of many small query results
logstashStatsResultSetOfAgentMonitoring.forEach((results: any) => {
(agentMonitoring as any).processStatsResults(results, options, monitoringClusterUuid);
});
logstashStateResultsMapOfAgentMonitoring.forEach((value: string[], clusterUuid: string) => {
value.forEach((results: any) => {
(agentMonitoring as any).processStateResults(results, options, monitoringClusterUuid);
});
});
expect(options.allHostIds).toStrictEqual({
'1n1p': ['cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5'],
'1nmp': ['47a70feb-3cb5-4618-8670-2c0bada61acd', '5a65d966-0330-4bd7-82f2-ee81040c13cf'],
mnmp: [
'2fcd4161-e08f-4eea-818b-703ea3ec6389',
'c6785d63-6e5f-42c2-839d-5edf139b7c19',
'bc6ef6f2-ecce-4328-96a2-002de41a144d',
],
});
expect(options.clusters).toStrictEqual({
'1n1p': {
cluster_stats: {
collection_types: {
filebeat: 1,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
count: 2,
},
plugins: [
{
count: 1,
name: 'logstash-input-generator',
},
{
count: 1,
name: 'logstash-input-heartbeat',
},
{
count: 1,
name: 'logstash-codec-dots',
},
],
},
count: 1,
versions: [
{
count: 1,
version: '8.15.0',
},
],
},
'1nmp': {
cluster_stats: {
collection_types: {
filebeat: 2,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
count: 2,
},
plugins: [
{
count: 2,
name: 'logstash-codec-plain',
},
],
},
count: 2,
versions: [
{
count: 2,
version: '8.15.0',
},
],
},
mnmp: {
cluster_stats: {
collection_types: {
filebeat: 3,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
count: 2,
},
plugins: [
{
count: 1,
name: 'logstash-codec-plain',
},
{
count: 1,
name: 'logstash-codec-rubydebug',
},
{
count: 1,
name: 'logstash-output-stdout',
},
],
},
count: 3,
versions: [
{
count: 3,
version: '8.15.0',
},
],
},
});
});
});
});

View file

@ -0,0 +1,310 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { ElasticsearchClient } from '@kbn/core/server';
import type * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import moment from 'moment';
import { createQuery } from './create_query';
import { mapToList } from './get_high_level_stats';
import { incrementByKey } from './get_high_level_stats';
import {
TELEMETRY_QUERY_SOURCE,
INDEX_PATTERN_LOGSTASH_METRICS_PLUGINS,
INDEX_PATTERN_LOGSTASH_METRICS_NODE,
} from '../../common/constants';
import {
HITS_SIZE,
LOGSTASH_PLUGIN_TYPES,
getLogstashBaseStats,
Counter,
LogstashMonitoring,
LogstashProcessOptions,
LogstashState,
LogstashStats,
LogstashStatsByClusterUuid,
} from './logstash_monitoring';
export class LogstashAgentMonitoring implements LogstashMonitoring {
/*
* Call the function for fetching and summarizing Logstash metrics for agent (LS integration) monitoring
* @param {Object} callCluster - ES client
* @param {Array} clusterUuids - List cluster UUIDs to retrieve metrics
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
* @return {Object} - Logstash stats in an object keyed by the cluster UUIDs
* Note that, we try to fetch all metrics for the given time regardless of the cluster UUID
* If metrics do not have UUID, metrics will be included in the monitoring cluster UUID
*/
async collectMetrics(
callCluster: ElasticsearchClient,
clusterUuids: string[],
monitoringClusterUuid: string,
start: string,
end: string,
options: LogstashProcessOptions
): Promise<LogstashStatsByClusterUuid> {
await this.fetchLogstashStats(callCluster, monitoringClusterUuid, start, end, options);
const allHostIds = Object.values(options.allHostIds).flat();
if (allHostIds.length > 0) {
await this.fetchLogstashState(
callCluster,
allHostIds,
monitoringClusterUuid,
start,
end,
options
);
}
return options.clusters;
}
setIndexPattern(monitoringType: string) {}
/*
* Update a clusters object with processed Logstash stats for agent monitoring
* @param {Array} results - array of LogstashStats docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} allEphemeralIds - EphemeralIds in an object keyed by cluster UUIDs to track the pipelines for the cluster
* @param {Object} versions - Versions in an object keyed by cluster UUIDs to track the logstash versions for the cluster
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
* @param {string} monitoringClusterUuid - monitoring cluster UUID
*/
private processStatsResults(
results: estypes.SearchResponse<LogstashStats>,
{ clusters, allEphemeralIds, allHostIds, versions, plugins }: LogstashProcessOptions,
monitoringClusterUuid: string
) {
const currHits = results?.hits?.hits || [];
currHits.forEach((hit) => {
// if orphan (no uuid) cluster found, report it with monitoring cluster UUID
const clusterId = hit._source!.logstash?.elasticsearch?.cluster?.id || [];
const clusterUuid = clusterId[0] || monitoringClusterUuid;
if (clusterUuid !== undefined && clusters[clusterUuid] === undefined) {
clusters[clusterUuid] = getLogstashBaseStats();
versions[clusterUuid] = new Map();
plugins[clusterUuid] = new Map();
}
const logstashStats = hit._source?.logstash?.node?.stats;
if (clusterUuid !== undefined && logstashStats !== undefined) {
const clusterStats = clusters[clusterUuid].cluster_stats || {};
clusterStats.monitoringClusterUuid = monitoringClusterUuid;
clusters[clusterUuid].count = (clusters[clusterUuid].count || 0) + 1;
const thisVersion = logstashStats.logstash?.version;
const a: Counter = versions[clusterUuid];
incrementByKey(a, thisVersion);
clusters[clusterUuid].versions = mapToList(a, 'version');
const ephemeralId = logstashStats.logstash?.ephemeral_id;
if (ephemeralId !== undefined) {
allEphemeralIds[clusterUuid] = allEphemeralIds[clusterUuid] || [];
if (!allEphemeralIds[clusterUuid].includes(ephemeralId)) {
allEphemeralIds[clusterUuid].push(ephemeralId);
}
}
const hostId = hit._source?.host?.id;
if (hostId !== undefined) {
allHostIds[clusterUuid] = allHostIds[clusterUuid] || [];
if (!allHostIds[clusterUuid].includes(hostId)) {
allHostIds[clusterUuid].push(hostId);
}
}
const thisCollectionType = hit._source?.agent?.type || 'agent';
if (!clusterStats.hasOwnProperty('collection_types')) {
clusterStats.collection_types = {};
}
clusterStats.collection_types![thisCollectionType] =
(clusterStats.collection_types![thisCollectionType] || 0) + 1;
const pipelines = logstashStats?.logstash?.pipelines || [];
if (!clusterStats.hasOwnProperty('pipelines')) {
clusterStats.pipelines = {};
}
clusterStats.pipelines!.count = pipelines.length;
// TODO: add queue types of the pipelines with next iterations
}
});
}
/*
* Update a clusters object with logstash state details for agent monitoring
* @param {Array} results - array of LogstashState docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} allEphemeralIds - EphemeralIds in an object keyed by cluster UUIDs to track the pipelines for the cluster
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
* @param {string} monitoringClusterUuid - monitoring cluster UUID
*/
private processStateResults(
results: estypes.SearchResponse<LogstashState>,
{ clusters, allEphemeralIds, plugins }: LogstashProcessOptions,
monitoringClusterUuid: string
) {
const currHits = results?.hits?.hits || [];
currHits.forEach((hit) => {
const clusterUuid =
hit._source?.logstash?.pipeline?.elasticsearch?.cluster?.id || monitoringClusterUuid;
const pipelineStats = clusters[clusterUuid]?.cluster_stats?.pipelines;
// pipeline is defined in the mapping but contains plugin info in a reality
const logstashStatePlugin = hit._source?.logstash?.pipeline;
if (pipelineStats !== undefined && logstashStatePlugin !== undefined) {
const pluginType = logstashStatePlugin?.plugin?.type;
const pluginName = pluginType
? logstashStatePlugin?.plugin?.[`${pluginType}`]?.name
: undefined;
if (pluginName !== undefined && pluginType !== undefined) {
incrementByKey(plugins[clusterUuid], `logstash-${pluginType}-${pluginName}`);
}
const clusterStats = clusters[clusterUuid]?.cluster_stats;
if (clusterStats !== undefined) {
clusterStats.plugins = mapToList(plugins[clusterUuid], 'name');
}
}
});
}
/*
* Creates a query and executes against ES to fetch agent monitoring, Logstash stats metrics
* @param {Object} callCluster - ES client
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
*/
private async fetchLogstashStats(
callCluster: ElasticsearchClient,
monitoringClusterUuid: string,
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions
): Promise<void> {
const filterPath: string[] = [
'hits.hits._source.cluster_uuid',
'hits.hits._source.agent.type',
'hits.hits._source.host.id',
'hits.hits._source.logstash.elasticsearch.cluster.id', // alias for cluster_uuid
'hits.hits._source.logstash.node.stats.logstash',
];
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH_METRICS_NODE,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
filters: [
{
bool: {
should: [{ term: { 'data_stream.dataset': 'logstash.node' } }],
},
},
{
range: {
'@timestamp': {
format: 'epoch_millis',
gte: moment.utc(start).valueOf(),
lte: moment.utc(end).valueOf(),
},
},
},
],
}) as estypes.QueryDslQueryContainer,
collapse: {
field: 'host.id',
},
sort: [{ '@timestamp': { order: 'desc', unmapped_type: 'long' } }],
from: page * HITS_SIZE,
size: HITS_SIZE,
},
};
const results = await callCluster.search<LogstashStats>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
this.processStatsResults(results, options, monitoringClusterUuid);
}
return Promise.resolve();
}
/*
* Creates a query and executes against ES to fetch agent monitoring, Logstash state metrics
* @param {Object} callCluster - ES client
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {Array} hostIds - Logstash host IDs
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
*/
private async fetchLogstashState(
callCluster: ElasticsearchClient,
hostIds: string[],
monitoringClusterUuid: string,
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions
): Promise<void> {
const filters = [
{
bool: {
should: [{ term: { 'data_stream.dataset': 'logstash.plugins' } }],
},
},
{ terms: { 'host.id': hostIds } },
{
range: {
'@timestamp': {
format: 'epoch_millis',
gte: moment.utc(start).valueOf(),
lte: moment.utc(end).valueOf(),
},
},
},
];
// collapse by `plugin-{type}.id` to gather unique plugins pipeline is using
for (const pluginType of LOGSTASH_PLUGIN_TYPES) {
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH_METRICS_PLUGINS,
ignore_unavailable: true,
filter_path: ['hits.hits._source.logstash.pipeline'],
body: {
query: createQuery({
filters,
}) as estypes.QueryDslQueryContainer,
collapse: { field: `logstash.pipeline.plugin.${pluginType}.id` },
sort: [{ '@timestamp': { order: 'desc', unmapped_type: 'long' } }],
size: HITS_SIZE,
},
};
const results = await callCluster.search<LogstashState>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
this.processStateResults(results, options, monitoringClusterUuid);
}
}
return Promise.resolve();
}
}

View file

@ -0,0 +1,628 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import sinon from 'sinon';
import { ElasticsearchClient } from '@kbn/core/server';
import { LogstashMetricbeatMonitoring } from './logstash_metricbeat_monitoring';
import {
INDEX_PATTERN_LOGSTASH_MONITORING,
INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATS,
INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATE,
} from '../../common/constants';
// eslint-disable-next-line @typescript-eslint/no-var-requires
const logstashStatsResultSetOfMetricbeatMonitoring = require('./__mocks__/fixtures/logstash_stats_metricbeat_monitoring_results.json');
const logstashStateResultsMapOfMetricbeatMonitoring = new Map();
// Load data for state results.
['1n1p', '1nmp', 'mnmp'].forEach((data) => {
logstashStateResultsMapOfMetricbeatMonitoring.set(
data,
// eslint-disable-next-line @typescript-eslint/no-var-requires
require(`./__mocks__/fixtures/logstash_state_metricbeat_monitoring_results_${data}`)
);
});
const getBaseOptions = () => ({
clusters: {},
allEphemeralIds: {},
versions: {},
plugins: {},
});
describe('LogstashMetricbeatMonitoring', () => {
let metricbeatMonitoring: LogstashMetricbeatMonitoring;
const monitoringClusterUuid: string = 'monitoringClusterUuid';
const searchMock = sinon.stub();
const callCluster = { search: searchMock } as unknown as ElasticsearchClient;
const start = '2024-05-31T00:00:00.000Z';
const end = '2024-05-31T00:20:00.000Z';
beforeEach(() => {
metricbeatMonitoring = new LogstashMetricbeatMonitoring();
searchMock.returns(Promise.resolve({}));
});
afterEach(() => {
searchMock.reset();
});
test('should set and get indexPattern correctly', () => {
metricbeatMonitoring.setIndexPattern('legacy');
const indexPatternForLegacy = metricbeatMonitoring.getIndexPattern();
expect(indexPatternForLegacy.stats).toBe(INDEX_PATTERN_LOGSTASH_MONITORING);
expect(indexPatternForLegacy.state).toBe(INDEX_PATTERN_LOGSTASH_MONITORING);
metricbeatMonitoring.setIndexPattern('stack');
const indexPatternForStack = metricbeatMonitoring.getIndexPattern();
expect(indexPatternForStack.stats).toBe(INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATS);
expect(indexPatternForStack.state).toBe(INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATE);
});
describe('Logstash metricbeat monitoring query test', () => {
const clusterUuid = 'a';
const ephemeralIds = ['a', 'b', 'c'];
it('creates proper query for stats', async () => {
const expectedQuery = {
bool: {
filter: [
{
range: {
timestamp: {
format: 'epoch_millis',
gte: 1717113600000,
lte: 1717114800000,
},
},
},
{
bool: {
should: [
{ term: { 'metricset.name': 'node_stats' } },
{ term: { 'data_stream.dataset': 'logstash.stack_monitoring.node_stats' } },
],
},
},
],
},
};
await (metricbeatMonitoring as any).fetchLogstashStats(
callCluster,
monitoringClusterUuid,
start,
end,
{} as any
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.from).toEqual(0);
expect(body.size).toEqual(10000);
expect(body.query).toEqual(expectedQuery);
});
it('creates the logstash state query correctly for state', async () => {
const expected = {
bool: {
filter: [
{
terms: {
'logstash.node.state.pipeline.ephemeral_id': ['a', 'b', 'c'],
},
},
{
bool: {
should: [
{ term: { 'metricset.name': 'node' } },
{ term: { 'data_stream.dataset': 'logstash.stack_monitoring.node' } },
],
},
},
],
},
};
await (metricbeatMonitoring as any).fetchLogstashState(
callCluster,
clusterUuid,
ephemeralIds,
start,
end,
{} as any
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.query).toEqual(expected);
});
});
describe('Process query results', () => {
it('summarizes empty results', () => {
const resultsEmpty = undefined;
const options = getBaseOptions();
(metricbeatMonitoring as any).processStatsResults(
resultsEmpty as any,
options,
monitoringClusterUuid
);
expect(options.clusters).toStrictEqual({});
});
it('summarizes single result with some missing fields', () => {
const source = {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
elasticsearch: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
},
node: {
stats: {
logstash: {
uuid: '61de393a-f2b6-4b6c-8cea-22661f9c4134',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const results = {
hits: {
hits: [
{
_source: source,
},
],
},
};
const options = getBaseOptions();
(metricbeatMonitoring as any).processStatsResults(
results as any,
options,
'FlV4ckTxQ0a78hmBkzzc9A'
);
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
metricbeat: 1,
},
monitoringClusterUuid: 'FlV4ckTxQ0a78hmBkzzc9A',
pipelines: {},
queues: {
memory: 1,
},
},
versions: [],
},
});
});
it('retrieves all ephemeral ids from all hits for the same cluster', () => {
const source1 = {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
elasticsearch: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
},
node: {
stats: {
logstash: {
uuid: '0000000-0000-0000-0000-000000000000',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const source2 = {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
elasticsearch: {
cluster: {
id: 'FlV4ckTxQ0a78hmBkzzc9A',
},
},
node: {
stats: {
logstash: {
uuid: '11111111-1111-1111-1111-111111111111',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const source3 = {
metricset: {
period: 10000,
name: 'node_stats',
},
logstash: {
cluster: {
id: '3',
},
elasticsearch: {
cluster: {
id: '3',
},
},
node: {
stats: {
logstash: {
uuid: '22222222-2222-2222-2222-222222222222',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cccccccc-cccc-cccc-cccc-cccccccccccc',
queue: {
type: 'memory',
},
},
],
},
},
},
};
const results = {
hits: {
hits: [
{
_source: source1,
},
{
_source: source2,
},
{
_source: source3,
},
],
},
};
const options = getBaseOptions();
(metricbeatMonitoring as any).processStatsResults(
results as any,
options,
monitoringClusterUuid
);
expect(options.allEphemeralIds).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: [
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
],
'3': ['cccccccc-cccc-cccc-cccc-cccccccccccc'],
});
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 2,
cluster_stats: {
plugins: [],
collection_types: {
metricbeat: 2,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {},
queues: {
memory: 2,
},
},
versions: [],
},
'3': {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
metricbeat: 1,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {},
queues: {
memory: 1,
},
},
versions: [],
},
});
});
it('summarizes stats from hits across multiple result objects', () => {
const options = getBaseOptions();
// logstashStatsResultSet is an array of many small query results
logstashStatsResultSetOfMetricbeatMonitoring.forEach((results: any) => {
(metricbeatMonitoring as any).processStatsResults(results, options, monitoringClusterUuid);
});
logstashStateResultsMapOfMetricbeatMonitoring.forEach(
(value: string[], clusterUuid: string) => {
value.forEach((results: any) => {
(metricbeatMonitoring as any).processStateResults(
results,
options,
monitoringClusterUuid
);
});
}
);
expect(options.allEphemeralIds).toStrictEqual({
'1n1p': ['cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5'],
'1nmp': [
'47a70feb-3cb5-4618-8670-2c0bada61acd',
'5a65d966-0330-4bd7-82f2-ee81040c13cf',
'8d33fe25-a2c0-4c54-9ecf-d218cb8dbfe4',
'f4167a94-20a8-43e7-828e-4cf38d906187',
],
mnmp: [
'2fcd4161-e08f-4eea-818b-703ea3ec6389',
'c6785d63-6e5f-42c2-839d-5edf139b7c19',
'bc6ef6f2-ecce-4328-96a2-002de41a144d',
'72058ad1-68a1-45f6-a8e8-10621ffc7288',
'18593052-c021-4158-860d-d8122981a0ac',
'4207025c-9b00-4bea-a36c-6fbf2d3c215e',
'0ec4702d-b5e5-4c60-91e9-6fa6a836f0d1',
'41258219-b129-4fad-a629-f244826281f8',
'e73bc63d-561a-4acd-a0c4-d5f70c4603df',
'ddf882b7-be26-4a93-8144-0aeb35122651',
'602936f5-98a3-4f8c-9471-cf389a519f4b',
'8b300988-62cc-4bc6-9ee0-9194f3f78e27',
'6ab60531-fb6f-478c-9063-82f2b0af2bed',
'802a5994-a03c-44b8-a650-47c0f71c2e48',
'6070b400-5c10-4c5e-b5c5-a5bd9be6d321',
'3193df5f-2a34-4fe3-816e-6b05999aa5ce',
'994e68cd-d607-40e6-a54c-02a51caa17e0',
],
});
expect(options.clusters).toStrictEqual({
'1n1p': {
count: 1,
versions: [
{
count: 1,
version: '7.10.0',
},
],
cluster_stats: {
collection_types: { metricbeat: 1 },
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
batch_size_avg: 125,
batch_size_max: 125,
batch_size_min: 125,
batch_size_total: 125,
count: 1,
sources: {
file: true,
},
workers_avg: 1,
workers_max: 1,
workers_min: 1,
workers_total: 1,
},
plugins: [
{
count: 1,
name: 'logstash-input-stdin',
},
{
count: 1,
name: 'logstash-input-elasticsearch',
},
{
count: 3,
name: 'logstash-filter-mutate',
},
{
count: 3,
name: 'logstash-filter-ruby',
},
{
count: 1,
name: 'logstash-filter-split',
},
{
count: 1,
name: 'logstash-filter-elasticsearch',
},
{
count: 1,
name: 'logstash-filter-aggregate',
},
{
count: 1,
name: 'logstash-filter-drop',
},
{
count: 1,
name: 'logstash-output-elasticsearch',
},
{
count: 1,
name: 'logstash-output-stdout',
},
],
queues: {
memory: 1,
},
},
},
'1nmp': {
count: 1,
versions: [
{
count: 1,
version: '7.8.0',
},
],
cluster_stats: {
collection_types: { metricbeat: 1 },
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
batch_size_avg: 406.5,
batch_size_max: 1251,
batch_size_min: 125,
batch_size_total: 1626,
count: 4,
sources: {
xpack: true,
},
workers_avg: 17.25,
workers_max: 44,
workers_min: 1,
workers_total: 69,
},
plugins: [
{
count: 4,
name: 'logstash-input-stdin',
},
{
count: 4,
name: 'logstash-output-stdout',
},
],
queues: {
memory: 3,
persisted: 1,
},
},
},
mnmp: {
count: 3,
versions: [
{
count: 1,
version: '7.9.2',
},
{
count: 1,
version: '7.9.1',
},
{
count: 1,
version: '7.10.0',
},
],
cluster_stats: {
collection_types: { metricbeat: 3 },
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
batch_size_avg: 33.294117647058826,
batch_size_max: 125,
batch_size_min: 1,
batch_size_total: 566,
count: 17,
sources: {
file: true,
string: true,
},
workers_avg: 7.411764705882353,
workers_max: 16,
workers_min: 1,
workers_total: 126,
},
plugins: [
{
count: 1,
name: 'logstash-input-stdin',
},
{
count: 1,
name: 'logstash-filter-clone',
},
{
count: 3,
name: 'logstash-output-pipeline',
},
{
count: 2,
name: 'logstash-input-pipeline',
},
{
count: 16,
name: 'logstash-filter-sleep',
},
{
count: 14,
name: 'logstash-output-stdout',
},
{
count: 14,
name: 'logstash-input-generator',
},
],
queues: {
memory: 3,
persisted: 14,
},
},
},
});
});
});
});

View file

@ -0,0 +1,375 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { ElasticsearchClient } from '@kbn/core/server';
import type * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import { createQuery } from './create_query';
import { mapToList } from './get_high_level_stats';
import { incrementByKey } from './get_high_level_stats';
import {
TELEMETRY_QUERY_SOURCE,
INDEX_PATTERN_LOGSTASH_MONITORING,
INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATE,
INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATS,
} from '../../common/constants';
import {
HITS_SIZE,
getLogstashBaseStats,
Counter,
LogstashMonitoring,
LogstashProcessOptions,
LogstashState,
LogstashStats,
LogstashStatsByClusterUuid,
} from './logstash_monitoring';
export class LogstashMetricbeatMonitoring implements LogstashMonitoring {
private indexPattern: { [key: string]: string } = {
state: INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATE,
stats: INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATS,
};
/*
* Call the function for fetching and summarizing Logstash metrics for Metricbeat monitoring
* @param {Object} callCluster - ES client
* @param {Array} clusterUuids - List cluster UUIDs to retrieve metrics
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
* @return {Object} - Logstash stats in an object keyed by the cluster UUIDs
* Note that, we try to fetch all metrics for the given time regardless of the cluster UUID
* If metrics do not have UUID, metrics will be included in the monitoring cluster UUID
*/
async collectMetrics(
callCluster: ElasticsearchClient,
clusterUuids: string[],
monitoringClusterUuid: string,
start: string,
end: string,
options: LogstashProcessOptions
): Promise<LogstashStatsByClusterUuid> {
await this.fetchLogstashStats(callCluster, monitoringClusterUuid, start, end, options);
const allEphemeralIds = Object.values(options.allEphemeralIds).flat();
if (allEphemeralIds.length > 0) {
await this.fetchLogstashState(callCluster, monitoringClusterUuid, allEphemeralIds, options);
}
return options.clusters;
}
/*
* Sets the index patterns based on the metricbeat monitoring types: [legacy, stack]
* @param monitoringType - the monitoring type where metricbeat monitoring is intended.
*/
setIndexPattern(monitoringType: string) {
if (monitoringType === 'stack') {
this.indexPattern.state = INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATE;
this.indexPattern.stats = INDEX_PATTERN_LOGSTASH_STACK_MONITORING_STATS;
} else {
this.indexPattern.state = INDEX_PATTERN_LOGSTASH_MONITORING;
this.indexPattern.stats = INDEX_PATTERN_LOGSTASH_MONITORING;
}
}
getIndexPattern(): { [key: string]: string } {
return this.indexPattern;
}
/*
* Update a clusters object with processed Logstash stats for metricbeat monitoring
* @param {Array} results - array of LogstashStats docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} allEphemeralIds - EphemeralIds in an object keyed by cluster UUIDs to track the pipelines for the cluster
* @param {Object} versions - Versions in an object keyed by cluster UUIDs to track the logstash versions for the cluster
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
* @param {string} monitoringClusterUuid - monitoring cluster UUID
*/
private processStatsResults(
results: estypes.SearchResponse<LogstashStats>,
{ clusters, allEphemeralIds, versions, plugins }: LogstashProcessOptions,
monitoringClusterUuid: string
) {
const currHits = results?.hits?.hits || [];
currHits.forEach((hit) => {
// consider orphan case as well
// orphan case: where pipeline doesn't set cluster UUID or es-output plugin isn't in pipeline.
const clusterUuid =
hit._source!.logstash?.elasticsearch?.cluster?.id || monitoringClusterUuid;
if (clusterUuid !== undefined && clusters[clusterUuid] === undefined) {
clusters[clusterUuid] = getLogstashBaseStats();
versions[clusterUuid] = new Map();
plugins[clusterUuid] = new Map();
}
const logstashStats = hit._source?.logstash?.node?.stats;
if (clusterUuid !== undefined && logstashStats !== undefined) {
const clusterStats = clusters[clusterUuid].cluster_stats || {};
clusterStats.monitoringClusterUuid = monitoringClusterUuid;
clusters[clusterUuid].count = (clusters[clusterUuid].count || 0) + 1;
const thisVersion = logstashStats.logstash?.version;
const a: Counter = versions[clusterUuid];
incrementByKey(a, thisVersion);
clusters[clusterUuid].versions = mapToList(a, 'version');
const thisCollectionType = hit._source?.agent?.type || 'metricbeat';
if (!clusterStats.hasOwnProperty('collection_types')) {
clusterStats.collection_types = {};
}
clusterStats.collection_types![thisCollectionType] =
(clusterStats.collection_types![thisCollectionType] || 0) + 1;
const pipelines = logstashStats.pipelines || [];
pipelines.forEach((pipeline) => {
const thisQueueType = pipeline.queue?.type;
if (thisQueueType !== undefined) {
if (!clusterStats.hasOwnProperty('queues')) {
clusterStats.queues = {};
}
clusterStats.queues![thisQueueType] = (clusterStats.queues![thisQueueType] || 0) + 1;
}
const ephemeralId = pipeline.ephemeral_id;
if (ephemeralId !== undefined) {
allEphemeralIds[clusterUuid] = allEphemeralIds[clusterUuid] || [];
allEphemeralIds[clusterUuid].push(ephemeralId);
}
});
}
});
}
/*
* Update a clusters object with logstash state details
* @param {Array} results - array of LogstashState docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
* @param {string} monitoringClusterUuid - monitoring cluster UUID
*/
private processStateResults(
results: estypes.SearchResponse<LogstashState>,
{ clusters, plugins }: LogstashProcessOptions,
monitoringClusterUuid: string
) {
const currHits = results?.hits?.hits || [];
currHits.forEach((hit) => {
const clusterUuid =
hit._source?.logstash?.elasticsearch?.cluster?.id || monitoringClusterUuid;
const pipelineStats = clusters[clusterUuid]?.cluster_stats?.pipelines;
const thisLogstashStatePipeline = hit._source?.logstash?.node?.state?.pipeline;
if (pipelineStats !== undefined && thisLogstashStatePipeline !== undefined) {
pipelineStats.count = (pipelineStats.count || 0) + 1;
const thisPipelineBatchSize = thisLogstashStatePipeline.batch_size;
if (thisPipelineBatchSize !== undefined) {
pipelineStats.batch_size_total =
(pipelineStats.batch_size_total || 0) + thisPipelineBatchSize;
pipelineStats.batch_size_max = pipelineStats.batch_size_max || 0;
pipelineStats.batch_size_min = pipelineStats.batch_size_min || 0;
pipelineStats.batch_size_avg = pipelineStats.batch_size_total / pipelineStats.count;
if (thisPipelineBatchSize > pipelineStats.batch_size_max) {
pipelineStats.batch_size_max = thisPipelineBatchSize;
}
if (
pipelineStats.batch_size_min === 0 ||
thisPipelineBatchSize < pipelineStats.batch_size_min
) {
pipelineStats.batch_size_min = thisPipelineBatchSize;
}
}
const thisPipelineWorkers = thisLogstashStatePipeline.workers;
if (thisPipelineWorkers !== undefined) {
pipelineStats.workers_total = (pipelineStats.workers_total || 0) + thisPipelineWorkers;
pipelineStats.workers_max = pipelineStats.workers_max || 0;
pipelineStats.workers_min = pipelineStats.workers_min || 0;
pipelineStats.workers_avg = pipelineStats.workers_total / pipelineStats.count;
if (thisPipelineWorkers > pipelineStats.workers_max) {
pipelineStats.workers_max = thisPipelineWorkers;
}
if (pipelineStats.workers_min === 0 || thisPipelineWorkers < pipelineStats.workers_min) {
pipelineStats.workers_min = thisPipelineWorkers;
}
}
// Extract the vertices object from the pipeline representation. From this, we can
// retrieve the source of the pipeline element on the configuration(from file, string, or
// x-pack-config-management), and the input, filter and output plugins from that pipeline.
const vertices = thisLogstashStatePipeline.representation?.graph?.vertices;
if (vertices !== undefined) {
vertices.forEach((vertex) => {
const configName = vertex.config_name;
const pluginType = vertex.plugin_type;
let pipelineConfig = vertex.meta?.source?.protocol;
if (pipelineConfig !== undefined) {
if (pipelineConfig === 'string' || pipelineConfig === 'str') {
pipelineConfig = 'string';
} else if (pipelineConfig === 'x-pack-config-management') {
pipelineConfig = 'xpack';
} else {
pipelineConfig = 'file';
}
if (!pipelineStats.hasOwnProperty('sources')) {
pipelineStats.sources = {};
}
pipelineStats.sources![pipelineConfig] = true;
}
if (configName !== undefined && pluginType !== undefined) {
incrementByKey(plugins[clusterUuid], `logstash-${pluginType}-${configName}`);
}
});
}
const clusterStats = clusters[clusterUuid]?.cluster_stats;
if (clusterStats !== undefined) {
clusterStats.plugins = mapToList(plugins[clusterUuid], 'name');
}
}
});
}
/*
* Creates a query and executes against ES to fetch metricbeat monitoring, Logstash stats metrics
* @param {Object} callCluster - ES client
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
*/
private async fetchLogstashStats(
callCluster: ElasticsearchClient,
monitoringClusterUuid: string,
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions
): Promise<void> {
const filterPath: string[] = [
'hits.hits._source.cluster_uuid',
'hits.hits._source.type',
'hits.hits._source.source_node',
'hits.hits._source.agent.type',
'hits.hits._source.logstash.elasticsearch.cluster.id', // alias for cluster_uuid
'hits.hits._source.logstash.node.stats.pipelines.id',
'hits.hits._source.logstash.node.stats.pipelines.ephemeral_id',
'hits.hits._source.logstash.node.stats.pipelines.queue.type',
'hits.hits._source.logstash.node.stats.logstash.version',
'hits.hits._source.logstash.node.stats.logstash.uuid',
];
const params: estypes.SearchRequest = {
index: this.indexPattern.stats,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
start,
end,
filters: [
{
bool: {
should: [
{ term: { 'metricset.name': 'node_stats' } },
{ term: { 'data_stream.dataset': 'logstash.stack_monitoring.node_stats' } },
],
},
},
],
}) as estypes.QueryDslQueryContainer,
collapse: {
field: 'logstash.node.stats.logstash.uuid',
},
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
from: page * HITS_SIZE,
size: HITS_SIZE,
},
};
const results = await callCluster.search<LogstashStats>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
this.processStatsResults(results, options, monitoringClusterUuid);
}
return Promise.resolve();
}
/*
* Creates a query and executes against ES to fetch metricbeat monitoring, Logstash state metrics
* @param {Object} callCluster - ES client
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {Array} ephemeralIds - Logstash pipeline ephemeral IDs
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
*/
private async fetchLogstashState(
callCluster: ElasticsearchClient,
monitoringClusterUuid: string,
ephemeralIds: string[],
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions
): Promise<void> {
const filterPath: string[] = [
'hits.hits._source.logstash.node.state.pipeline.batch_size',
'hits.hits._source.logstash.node.state.pipeline.workers',
'hits.hits._source.logstash.node.state.pipeline.representation.graph.vertices',
'hits.hits._source.type',
];
const params: estypes.SearchRequest = {
index: this.indexPattern.state,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
// metricbeat occasionally sends state metrics
// so, not using start and end periods as we need node state info to fill plugin usages
filters: [
{ terms: { 'logstash.node.state.pipeline.ephemeral_id': ephemeralIds } },
{
bool: {
should: [
{ term: { 'metricset.name': 'node' } },
{ term: { 'data_stream.dataset': 'logstash.stack_monitoring.node' } },
],
},
},
],
}) as estypes.QueryDslQueryContainer,
collapse: {
field: 'logstash.node.state.pipeline.ephemeral_id',
},
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
size: ephemeralIds.length,
},
};
const results = await callCluster.search<LogstashState>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
this.processStateResults(results, options, monitoringClusterUuid);
}
return Promise.resolve();
}
}

View file

@ -0,0 +1,202 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { ElasticsearchClient } from '@kbn/core-elasticsearch-server';
export type Counter = Map<string, number>;
export const HITS_SIZE = 10000; // maximum hits to receive from ES with each search
export const LOGSTASH_PLUGIN_TYPES = ['input', 'codec', 'filter', 'output'];
export interface LogstashMonitoring {
collectMetrics(
callCluster: ElasticsearchClient,
clusterUuids: string[],
monitoringClusterUuid: string,
start: string,
end: string,
options: LogstashProcessOptions
): Promise<LogstashStatsByClusterUuid>;
setIndexPattern(pattern: string): void;
}
export interface LogstashBaseStats {
// stats
versions: Array<{ version: string; count: number }>;
count: number;
cluster_stats?: {
collection_types?: { [collection_type_type: string]: number };
queues?: { [queue_type: string]: number };
plugins?: Array<{ name: string; count: number }>;
monitoringClusterUuid?: string;
pipelines?: {
count?: number;
batch_size_max?: number;
batch_size_avg?: number;
batch_size_min?: number;
batch_size_total?: number;
workers_max?: number;
workers_avg?: number;
workers_min?: number;
workers_total?: number;
sources?: { [source_type: string]: boolean };
};
};
}
export const getLogstashBaseStats = () => ({
versions: [],
count: 0,
cluster_stats: {
pipelines: {},
plugins: [],
},
});
export interface LogstashStats {
cluster_uuid: string;
source_node: string;
type: string;
agent?: {
type: string;
};
host?: {
id?: string;
};
// legacy monitoring shape
logstash_stats?: {
pipelines?: [
{
id?: string;
ephemeral_id: string;
queue?: {
type: string;
};
}
];
logstash?: {
version?: string;
uuid?: string;
snapshot?: string;
};
};
// metricbeat and agent driven monitoring shape
logstash?: {
node?: {
stats?: {
pipelines?: [
{
id?: string;
ephemeral_id: string;
queue?: {
type: string;
};
}
];
logstash?: {
version?: string;
uuid?: string;
snapshot?: string;
ephemeral_id: string;
pipelines?: [];
};
};
};
elasticsearch?: {
cluster?: {
id?: string;
};
};
};
}
export interface LogstashState {
// legacy monitoring shape
cluster_uuid: string;
logstash_state?: {
pipeline?: {
batch_size?: number;
workers?: number;
representation?: {
graph?: {
vertices?: [
{
config_name?: string;
plugin_type?: string;
meta?: {
source?: {
protocol?: string;
};
};
}
];
};
};
};
};
logstash?: {
// metricbeat monitoring shape
node?: {
state?: {
pipeline?: {
batch_size?: number;
workers?: number;
representation?: {
graph?: {
vertices?: [
{
config_name?: string;
plugin_type?: string;
meta?: {
source?: {
protocol?: string;
};
};
}
];
};
};
};
};
};
elasticsearch?: {
cluster?: {
id?: string;
};
};
// agent monitoring shape
pipeline?: {
elasticsearch?: {
cluster?: {
id?: string;
};
};
id: string;
plugin?: {
// <plugin type: PluginName>
[key: string]: PluginName;
};
};
};
}
export interface LogstashProcessOptions {
clusters: { [clusterUuid: string]: LogstashBaseStats };
allEphemeralIds: { [clusterUuid: string]: string[] }; // pipeline ephemeral IDs
allHostIds: { [clusterUuid: string]: string[] };
versions: { [clusterUuid: string]: Counter };
plugins: { [clusterUuid: string]: Counter };
}
export interface PluginName {
name: string;
}
export interface LogstashStatsByClusterUuid {
[clusterUuid: string]: LogstashBaseStats;
}

View file

@ -0,0 +1,551 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import sinon from 'sinon';
import { ElasticsearchClient } from '@kbn/core/server';
import { LogstashSelfMonitoring } from './logstash_self_monitoring';
// eslint-disable-next-line @typescript-eslint/no-var-requires
const logstashStatsResultSetOfSelfMonitoring = require('./__mocks__/fixtures/logstash_stats_self_monitoring_results.json');
const logstashStateResultsMapOfSelfMonitoring = new Map();
// Load data for state results.
['1n1p', '1nmp', 'mnmp'].forEach((data) => {
logstashStateResultsMapOfSelfMonitoring.set(
data,
// eslint-disable-next-line @typescript-eslint/no-var-requires
require(`./__mocks__/fixtures/logstash_state_self_monitoring_results_${data}`)
);
});
const getBaseOptions = () => ({
clusters: {},
allEphemeralIds: {},
versions: {},
plugins: {},
});
describe('LogstashSelfMonitoring', () => {
let selfMonitoring: LogstashSelfMonitoring;
const clusterUuids = ['aCluster', 'bCluster', 'cCluster'];
const monitoringClusterUuid: string = 'monitoringClusterUuid';
const searchMock = sinon.stub();
const callCluster = { search: searchMock } as unknown as ElasticsearchClient;
const start = '2024-05-31T00:00:00.000Z';
const end = '2024-05-31T00:20:00.000Z';
beforeEach(() => {
selfMonitoring = new LogstashSelfMonitoring();
searchMock.returns(Promise.resolve({}));
});
afterEach(() => {
searchMock.reset();
});
describe('Logstash self monitoring query test', () => {
const clusterUuid = 'a';
const ephemeralIds = ['a', 'b', 'c'];
it('creates proper query for stats', async () => {
const expectedQuery = {
bool: {
filter: [
{
range: {
timestamp: {
format: 'epoch_millis',
gte: 1717113600000,
lte: 1717114800000,
},
},
},
{
term: {
cluster_uuid: clusterUuids[0],
},
},
{
bool: {
should: [{ term: { type: 'logstash_stats' } }],
},
},
],
},
};
await (selfMonitoring as any).fetchLogstashStats(
callCluster,
clusterUuids[0],
monitoringClusterUuid,
start,
end,
{} as any
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.from).toEqual(0);
expect(body.size).toEqual(10000);
expect(body.query).toEqual(expectedQuery);
});
it('creates the logstash state query correctly for state', async () => {
const expected = {
bool: {
filter: [
{
range: {
timestamp: {
format: 'epoch_millis',
gte: 1717113600000,
lte: 1717114800000,
},
},
},
{
terms: {
'logstash_state.pipeline.ephemeral_id': ['a', 'b', 'c'],
},
},
{
bool: {
should: [{ term: { type: 'logstash_state' } }],
},
},
],
},
};
await (selfMonitoring as any).fetchLogstashState(
callCluster,
clusterUuid,
ephemeralIds,
start,
end,
{} as any
);
const { args } = searchMock.firstCall;
const [{ body }] = args;
expect(body.query).toEqual(expected);
});
});
describe('Process query results', () => {
it('summarizes empty results', () => {
const resultsEmpty = undefined;
const options = getBaseOptions();
(selfMonitoring as any).processStatsResults(
resultsEmpty as any,
options,
monitoringClusterUuid
);
expect(options.clusters).toStrictEqual({});
});
it('summarizes single result with some missing fields', () => {
const source = {
type: 'logstash_stats',
cluster_uuid: 'FlV4ckTxQ0a78hmBkzzc9A',
logstash_stats: {
logstash: {
uuid: '61de393a-f2b6-4b6c-8cea-22661f9c4134',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5',
queue: {
type: 'memory',
},
},
],
},
};
const results = {
hits: {
hits: [
{
_source: source,
},
],
},
};
const options = getBaseOptions();
(selfMonitoring as any).processStatsResults(results as any, options, monitoringClusterUuid);
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
internal_collection: 1,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {},
queues: {
memory: 1,
},
},
versions: [],
},
});
});
it('retrieves all ephemeral ids from all hits for the same cluster', () => {
const source1 = {
type: 'logstash_stats',
cluster_uuid: 'FlV4ckTxQ0a78hmBkzzc9A',
logstash_stats: {
logstash: {
uuid: '0000000-0000-0000-0000-000000000000',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
queue: {
type: 'memory',
},
},
],
},
};
const source2 = {
type: 'logstash_stats',
cluster_uuid: 'FlV4ckTxQ0a78hmBkzzc9A',
logstash_stats: {
logstash: {
uuid: '11111111-1111-1111-1111-111111111111',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
queue: {
type: 'memory',
},
},
],
},
};
const source3 = {
type: 'logstash_stats',
cluster_uuid: '3',
logstash_stats: {
logstash: {
uuid: '22222222-2222-2222-2222-222222222222',
},
pipelines: [
{
id: 'main',
ephemeral_id: 'cccccccc-cccc-cccc-cccc-cccccccccccc',
queue: {
type: 'memory',
},
},
],
},
};
const results = {
hits: {
hits: [
{
_source: source1,
},
{
_source: source2,
},
{
_source: source3,
},
],
},
};
const options = getBaseOptions();
(selfMonitoring as any).processStatsResults(results as any, options, monitoringClusterUuid);
expect(options.allEphemeralIds).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: [
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
],
'3': ['cccccccc-cccc-cccc-cccc-cccccccccccc'],
});
expect(options.clusters).toStrictEqual({
FlV4ckTxQ0a78hmBkzzc9A: {
count: 2,
cluster_stats: {
plugins: [],
collection_types: {
internal_collection: 2,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {},
queues: {
memory: 2,
},
},
versions: [],
},
'3': {
count: 1,
cluster_stats: {
plugins: [],
collection_types: {
internal_collection: 1,
},
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {},
queues: {
memory: 1,
},
},
versions: [],
},
});
});
it('summarizes stats from hits across multiple result objects', () => {
const options = getBaseOptions();
// logstashStatsResultSet is an array of many small query results
logstashStatsResultSetOfSelfMonitoring.forEach((results: any) => {
(selfMonitoring as any).processStatsResults(results, options, monitoringClusterUuid);
});
logstashStateResultsMapOfSelfMonitoring.forEach((value: string[], clusterUuid: string) => {
value.forEach((results: any) => {
(selfMonitoring as any).processStateResults(results, clusterUuid, options);
});
});
expect(options.allEphemeralIds).toStrictEqual({
'1n1p': ['cf37c6fa-2f1a-41e2-9a89-36b420a8b9a5'],
'1nmp': [
'47a70feb-3cb5-4618-8670-2c0bada61acd',
'5a65d966-0330-4bd7-82f2-ee81040c13cf',
'8d33fe25-a2c0-4c54-9ecf-d218cb8dbfe4',
'f4167a94-20a8-43e7-828e-4cf38d906187',
],
mnmp: [
'2fcd4161-e08f-4eea-818b-703ea3ec6389',
'c6785d63-6e5f-42c2-839d-5edf139b7c19',
'bc6ef6f2-ecce-4328-96a2-002de41a144d',
'72058ad1-68a1-45f6-a8e8-10621ffc7288',
'18593052-c021-4158-860d-d8122981a0ac',
'4207025c-9b00-4bea-a36c-6fbf2d3c215e',
'0ec4702d-b5e5-4c60-91e9-6fa6a836f0d1',
'41258219-b129-4fad-a629-f244826281f8',
'e73bc63d-561a-4acd-a0c4-d5f70c4603df',
'ddf882b7-be26-4a93-8144-0aeb35122651',
'602936f5-98a3-4f8c-9471-cf389a519f4b',
'8b300988-62cc-4bc6-9ee0-9194f3f78e27',
'6ab60531-fb6f-478c-9063-82f2b0af2bed',
'802a5994-a03c-44b8-a650-47c0f71c2e48',
'6070b400-5c10-4c5e-b5c5-a5bd9be6d321',
'3193df5f-2a34-4fe3-816e-6b05999aa5ce',
'994e68cd-d607-40e6-a54c-02a51caa17e0',
],
});
expect(options.clusters).toStrictEqual({
'1n1p': {
count: 1,
versions: [
{
count: 1,
version: '7.10.0',
},
],
cluster_stats: {
collection_types: { internal_collection: 1 },
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
batch_size_avg: 125,
batch_size_max: 125,
batch_size_min: 125,
batch_size_total: 125,
count: 1,
sources: {
file: true,
},
workers_avg: 1,
workers_max: 1,
workers_min: 1,
workers_total: 1,
},
plugins: [
{
count: 1,
name: 'logstash-input-stdin',
},
{
count: 1,
name: 'logstash-input-elasticsearch',
},
{
count: 3,
name: 'logstash-filter-mutate',
},
{
count: 3,
name: 'logstash-filter-ruby',
},
{
count: 1,
name: 'logstash-filter-split',
},
{
count: 1,
name: 'logstash-filter-elasticsearch',
},
{
count: 1,
name: 'logstash-filter-aggregate',
},
{
count: 1,
name: 'logstash-filter-drop',
},
{
count: 1,
name: 'logstash-output-elasticsearch',
},
{
count: 1,
name: 'logstash-output-stdout',
},
],
queues: {
memory: 1,
},
},
},
'1nmp': {
count: 1,
versions: [
{
count: 1,
version: '7.8.0',
},
],
cluster_stats: {
collection_types: { internal_collection: 1 },
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
batch_size_avg: 406.5,
batch_size_max: 1251,
batch_size_min: 125,
batch_size_total: 1626,
count: 4,
sources: {
xpack: true,
},
workers_avg: 17.25,
workers_max: 44,
workers_min: 1,
workers_total: 69,
},
plugins: [
{
count: 4,
name: 'logstash-input-stdin',
},
{
count: 4,
name: 'logstash-output-stdout',
},
],
queues: {
memory: 3,
persisted: 1,
},
},
},
mnmp: {
count: 3,
versions: [
{
count: 1,
version: '7.9.2',
},
{
count: 1,
version: '7.9.1',
},
{
count: 1,
version: '7.10.0',
},
],
cluster_stats: {
collection_types: { internal_collection: 3 },
monitoringClusterUuid: 'monitoringClusterUuid',
pipelines: {
batch_size_avg: 33.294117647058826,
batch_size_max: 125,
batch_size_min: 1,
batch_size_total: 566,
count: 17,
sources: {
file: true,
string: true,
},
workers_avg: 7.411764705882353,
workers_max: 16,
workers_min: 1,
workers_total: 126,
},
plugins: [
{
count: 1,
name: 'logstash-input-stdin',
},
{
count: 1,
name: 'logstash-filter-clone',
},
{
count: 3,
name: 'logstash-output-pipeline',
},
{
count: 2,
name: 'logstash-input-pipeline',
},
{
count: 16,
name: 'logstash-filter-sleep',
},
{
count: 14,
name: 'logstash-output-stdout',
},
{
count: 14,
name: 'logstash-input-generator',
},
],
queues: {
memory: 3,
persisted: 14,
},
},
},
});
});
});
});

View file

@ -0,0 +1,358 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { ElasticsearchClient } from '@kbn/core/server';
import type * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import { createQuery } from './create_query';
import { mapToList } from './get_high_level_stats';
import { incrementByKey } from './get_high_level_stats';
import { INDEX_PATTERN_LOGSTASH_MONITORING, TELEMETRY_QUERY_SOURCE } from '../../common/constants';
import {
HITS_SIZE,
getLogstashBaseStats,
Counter,
LogstashMonitoring,
LogstashProcessOptions,
LogstashState,
LogstashStats,
LogstashStatsByClusterUuid,
} from './logstash_monitoring';
export class LogstashSelfMonitoring implements LogstashMonitoring {
/*
* Call the function for fetching and summarizing Logstash metrics for self/legacy monitoring
* @param {Object} callCluster - ES client
* @param {Array} clusterUuids - List cluster UUIDs to retrieve metrics
* @param {string} monitoringClusterUuid - monitoring cluster UUID
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
* @return {Object} - Logstash stats in an object keyed by the cluster UUIDs
* Note that, we _only_ fetch metrics for the given time and cluster UUIDs
*/
async collectMetrics(
callCluster: ElasticsearchClient,
clusterUuids: string[],
monitoringClusterUuid: string,
start: string,
end: string,
options: LogstashProcessOptions
): Promise<LogstashStatsByClusterUuid> {
for (const clusterUuid of clusterUuids) {
await this.fetchLogstashStats(
callCluster,
clusterUuid,
monitoringClusterUuid,
start,
end,
options
);
if (options.clusters[clusterUuid] !== undefined) {
await this.fetchLogstashState(
callCluster,
clusterUuid,
options.allEphemeralIds[clusterUuid],
start,
end,
options
);
}
}
return options.clusters;
}
setIndexPattern(monitoringType: string) {}
/*
* Update a clusters object with processed Logstash stats for self monitoring
* @param {Array} results - array of LogstashStats docs from ES
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} allEphemeralIds - EphemeralIds in an object keyed by cluster UUIDs to track the pipelines for the cluster
* @param {Object} versions - Versions in an object keyed by cluster UUIDs to track the logstash versions for the cluster
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
* @param {string} monitoringClusterUuid - monitoring cluster UUID
*/
private processStatsResults(
results: estypes.SearchResponse<LogstashStats>,
{ clusters, allEphemeralIds, versions, plugins }: LogstashProcessOptions,
monitoringClusterUuid: string
) {
const currHits = results?.hits?.hits || [];
currHits.forEach((hit) => {
const clusterUuid = hit._source!.cluster_uuid;
if (clusterUuid !== undefined && clusters[clusterUuid] === undefined) {
clusters[clusterUuid] = getLogstashBaseStats();
versions[clusterUuid] = new Map();
plugins[clusterUuid] = new Map();
}
const logstashStats = hit._source?.logstash_stats;
if (clusterUuid !== undefined && logstashStats !== undefined) {
const clusterStats = clusters[clusterUuid].cluster_stats || {};
clusterStats.monitoringClusterUuid = monitoringClusterUuid;
clusters[clusterUuid].count = (clusters[clusterUuid].count || 0) + 1;
const thisVersion = logstashStats.logstash?.version;
const a: Counter = versions[clusterUuid];
incrementByKey(a, thisVersion);
clusters[clusterUuid].versions = mapToList(a, 'version');
// Internal Collection has no agent field, so default to 'internal_collection'
const thisCollectionType = hit._source?.agent?.type || 'internal_collection';
if (!clusterStats.hasOwnProperty('collection_types')) {
clusterStats.collection_types = {};
}
clusterStats.collection_types![thisCollectionType] =
(clusterStats.collection_types![thisCollectionType] || 0) + 1;
const pipelines = logstashStats.pipelines || [];
pipelines.forEach((pipeline) => {
const thisQueueType = pipeline.queue?.type;
if (thisQueueType !== undefined) {
if (!clusterStats.hasOwnProperty('queues')) {
clusterStats.queues = {};
}
clusterStats.queues![thisQueueType] = (clusterStats.queues![thisQueueType] || 0) + 1;
}
const ephemeralId = pipeline.ephemeral_id;
if (ephemeralId !== undefined) {
allEphemeralIds[clusterUuid] = allEphemeralIds[clusterUuid] || [];
allEphemeralIds[clusterUuid].push(ephemeralId);
}
});
}
});
}
/*
* Update a clusters object with logstash state details for self monitoring
* @param {Array} results - array of LogstashState docs from ES
* @param {string} clusterUuid - A cluster UUID
* @param {Object} clusters - LogstashBaseStats in an object keyed by the cluster UUIDs
* @param {Object} plugins - plugin information keyed by cluster UUIDs to count the unique plugins
*/
private processStateResults(
results: estypes.SearchResponse<LogstashState>,
clusterUuid: string,
{ clusters, plugins }: LogstashProcessOptions
) {
const currHits = results?.hits?.hits || [];
const clusterStats = clusters[clusterUuid].cluster_stats;
const pipelineStats = clusters[clusterUuid].cluster_stats?.pipelines;
currHits.forEach((hit) => {
const thisLogstashStatePipeline = hit._source?.logstash_state?.pipeline;
if (pipelineStats !== undefined && thisLogstashStatePipeline !== undefined) {
pipelineStats.count = (pipelineStats.count || 0) + 1;
const thisPipelineBatchSize = thisLogstashStatePipeline.batch_size;
if (thisPipelineBatchSize !== undefined) {
pipelineStats.batch_size_total =
(pipelineStats.batch_size_total || 0) + thisPipelineBatchSize;
pipelineStats.batch_size_max = pipelineStats.batch_size_max || 0;
pipelineStats.batch_size_min = pipelineStats.batch_size_min || 0;
pipelineStats.batch_size_avg = pipelineStats.batch_size_total / pipelineStats.count;
if (thisPipelineBatchSize > pipelineStats.batch_size_max) {
pipelineStats.batch_size_max = thisPipelineBatchSize;
}
if (
pipelineStats.batch_size_min === 0 ||
thisPipelineBatchSize < pipelineStats.batch_size_min
) {
pipelineStats.batch_size_min = thisPipelineBatchSize;
}
}
const thisPipelineWorkers = thisLogstashStatePipeline.workers;
if (thisPipelineWorkers !== undefined) {
pipelineStats.workers_total = (pipelineStats.workers_total || 0) + thisPipelineWorkers;
pipelineStats.workers_max = pipelineStats.workers_max || 0;
pipelineStats.workers_min = pipelineStats.workers_min || 0;
pipelineStats.workers_avg = pipelineStats.workers_total / pipelineStats.count;
if (thisPipelineWorkers > pipelineStats.workers_max) {
pipelineStats.workers_max = thisPipelineWorkers;
}
if (pipelineStats.workers_min === 0 || thisPipelineWorkers < pipelineStats.workers_min) {
pipelineStats.workers_min = thisPipelineWorkers;
}
}
// Extract the vertices object from the pipeline representation. From this, we can
// retrieve the source of the pipeline element on the configuration(from file, string, or
// x-pack-config-management), and the input, filter and output plugins from that pipeline.
const vertices = thisLogstashStatePipeline.representation?.graph?.vertices;
if (vertices !== undefined) {
vertices.forEach((vertex) => {
const configName = vertex.config_name;
const pluginType = vertex.plugin_type;
let pipelineConfig = vertex.meta?.source?.protocol;
if (pipelineConfig !== undefined) {
if (pipelineConfig === 'string' || pipelineConfig === 'str') {
pipelineConfig = 'string';
} else if (pipelineConfig === 'x-pack-config-management') {
pipelineConfig = 'xpack';
} else {
pipelineConfig = 'file';
}
if (!pipelineStats.hasOwnProperty('sources')) {
pipelineStats.sources = {};
}
pipelineStats.sources![pipelineConfig] = true;
}
if (configName !== undefined && pluginType !== undefined) {
incrementByKey(plugins[clusterUuid], `logstash-${pluginType}-${configName}`);
}
});
}
}
});
if (clusterStats !== undefined) {
clusterStats.plugins = mapToList(plugins[clusterUuid], 'name');
}
}
/*
* Creates a query and executes against ES to fetch self monitoring, Logstash stats metrics
* @param {Object} callCluster - ES client
* @param {string} clusterUuid - A cluster UUID
* @param {string} monitoringClusterUuid - A monitoring cluster UUID
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
*/
private async fetchLogstashStats(
callCluster: ElasticsearchClient,
clusterUuid: string,
monitoringClusterUuid: string,
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions
): Promise<void> {
const filterPath: string[] = [
'hits.hits._source.cluster_uuid',
'hits.hits._source.type',
'hits.hits._source.source_node',
'hits.hits._source.agent.type',
'hits.hits._source.logstash.elasticsearch.cluster.id', // alias for cluster_uuid
'hits.hits._source.logstash_stats.pipelines.id',
'hits.hits._source.logstash_stats.pipelines.ephemeral_id',
'hits.hits._source.logstash_stats.pipelines.queue.type',
'hits.hits._source.logstash_stats.logstash.version',
'hits.hits._source.logstash_stats.logstash.uuid',
];
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH_MONITORING,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
start,
end,
filters: [
{ term: { cluster_uuid: clusterUuid } },
{
bool: {
should: [{ term: { type: 'logstash_stats' } }],
},
},
],
}) as estypes.QueryDslQueryContainer,
from: page * HITS_SIZE,
collapse: {
field: 'logstash_stats.logstash.uuid',
},
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
size: HITS_SIZE,
},
};
const results = await callCluster.search<LogstashStats>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
this.processStatsResults(results, options, monitoringClusterUuid);
}
return Promise.resolve();
}
/*
* Creates a query and executes against ES to fetch self monitoring, Logstash state metrics
* @param {Object} callCluster - ES client
* @param {string} clusterUuid - A cluster UUID
* @param {Array} ephemeralIds - Logstash pipeline ephemeral IDs
* @param {string} start - start timestamp
* @param {string} end - end timestamp
* @param {Object} options - additional processing required options
*/
private async fetchLogstashState(
callCluster: ElasticsearchClient,
clusterUuid: string,
ephemeralIds: string[],
start: string,
end: string,
{ page = 0, ...options }: { page?: number } & LogstashProcessOptions
): Promise<void> {
const filterPath: string[] = [
'hits.hits._source.logstash_state.pipeline.batch_size',
'hits.hits._source.logstash_state.pipeline.workers',
'hits.hits._source.logstash_state.pipeline.representation.graph.vertices',
'hits.hits._source.type',
];
const params: estypes.SearchRequest = {
index: INDEX_PATTERN_LOGSTASH_MONITORING,
ignore_unavailable: true,
filter_path: filterPath,
body: {
query: createQuery({
start,
end,
filters: [
{ terms: { 'logstash_state.pipeline.ephemeral_id': ephemeralIds } },
{
bool: {
should: [{ term: { type: 'logstash_state' } }],
},
},
],
}) as estypes.QueryDslQueryContainer,
collapse: {
field: 'logstash_state.pipeline.ephemeral_id',
},
sort: [{ ['timestamp']: { order: 'desc', unmapped_type: 'long' } }],
size: ephemeralIds.length,
},
};
const results = await callCluster.search<LogstashState>(params, {
headers: {
'X-QUERY-SOURCE': TELEMETRY_QUERY_SOURCE,
},
});
const hitsLength = results?.hits?.hits.length || 0;
if (hitsLength > 0) {
// further augment the clusters object with more stats
this.processStateResults(results, clusterUuid, options);
}
return Promise.resolve();
}
}

View file

@ -8,7 +8,7 @@
import type { IClusterClient } from '@kbn/core/server';
import type { UsageCollectionSetup } from '@kbn/usage-collection-plugin/server';
import type { UsageStatsPayload } from '@kbn/telemetry-collection-manager-plugin/server';
import type { LogstashBaseStats } from './get_logstash_stats';
import type { LogstashBaseStats } from './logstash_monitoring';
import type { BeatsBaseStats } from './get_beats_stats';
import { getAllStats } from './get_all_stats';
import { getClusterUuids } from './get_cluster_uuids';
@ -57,6 +57,7 @@ export function registerMonitoringTelemetryCollection(
},
count: { type: 'long' },
cluster_stats: {
monitoringClusterUuid: { type: 'keyword' },
collection_types: {
DYNAMIC_KEY: { type: 'long' },
},

View file

@ -48,6 +48,7 @@
"@kbn/react-kibana-context-render",
"@kbn/flot-charts",
"@kbn/ui-theme",
"@kbn/core-elasticsearch-server",
],
"exclude": [
"target/**/*",

View file

@ -43,6 +43,9 @@
},
"cluster_stats": {
"properties": {
"monitoringClusterUuid": {
"type": "keyword"
},
"collection_types": {
"properties": {
"DYNAMIC_KEY": {

View file

@ -976,49 +976,51 @@
"plugins": {}
},
"logstash": {
"count": 1,
"count": 1,
"cluster_stats": {
"monitoringClusterUuid": "integrationTestClusterUuid",
"collection_types": {
"internal_collection": 1
},
"pipelines": {
"batch_size_min": 125,
"batch_size_max": 125,
"batch_size_avg": 125,
"batch_size_total": 125,
"count": 1,
"sources": {
"string": true
},
"workers_avg": 4,
"workers_total": 4,
"workers_min": 4,
"workers_max": 4
},
"plugins": [
{
"count": 1,
"name": "logstash-input-twitter"
},
{
"count": 1,
"name": "logstash-output-stdout"
},
{
"count": 1,
"name": "logstash-output-elasticsearch"
}
],
"pipelines": {
"batch_size_min": 125,
"batch_size_max": 125,
"batch_size_avg": 125,
"batch_size_total": 125,
"count": 1,
"sources": {
"string": true
},
"workers_avg": 4,
"workers_total": 4,
"workers_min": 4,
"workers_max": 4
},
"plugins": [
{
"count": 1,
"name": "logstash-input-twitter"
},
{
"count": 1,
"name": "logstash-output-stdout"
},
{
"count": 1,
"name": "logstash-output-elasticsearch"
}
],
"queues": {
"memory": 1
}
},
"versions": [
{
"count": 1,
"version": "7.0.0-alpha1"
}
]}
{
"count": 1,
"version": "7.0.0-alpha1"
}
]
}
},
"timestamp": "2017-08-15T22:10:52.642Z",
"version": "7.0.0-alpha1",

View file

@ -42,6 +42,22 @@ function getCacheDetails(body: UnencryptedTelemetryPayload): CacheDetails[] {
return body.map(({ stats }) => (stats as UsageStatsPayload).cacheDetails);
}
function updateClusterUuidInLogstashStats(
clusterUuid: string,
payload: Array<Record<string, any>>
) {
// eslint-disable-next-line @typescript-eslint/naming-convention
return payload.map(({ stack_stats, ...item }) => {
const { logstash } = stack_stats;
if (logstash) {
// eslint-disable-next-line @typescript-eslint/naming-convention
const { cluster_stats } = logstash;
cluster_stats.monitoringClusterUuid = clusterUuid;
}
return { stack_stats, ...item };
});
}
/**
* Update the .monitoring-* documents loaded via the archiver to the recent `timestamp`
* @param esSupertest The client to send requests to ES
@ -161,7 +177,12 @@ export default function ({ getService }: FtrProviderContext) {
expect(monitoring).length(3);
expect(localXPack.collectionSource).to.eql('local_xpack');
expect(omitCacheDetails(monitoring)).to.eql(
const withoutCacheDetailsMonitoring = omitCacheDetails(monitoring);
const lsClusterUuidChangedMonitoring = updateClusterUuidInLogstashStats(
'integrationTestClusterUuid',
withoutCacheDetailsMonitoring
);
expect(lsClusterUuidChangedMonitoring).to.eql(
updateFixtureTimestamps(multiClusterFixture, timestamp)
);
});