mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 09:48:58 -04:00
[Telemetry] Report data shippers (#64935)
Co-authored-by: Christiane (Tina) Heiligers <christiane.heiligers@elastic.co> Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
This commit is contained in:
parent
45f0322fbc
commit
6607bf7b49
15 changed files with 915 additions and 220 deletions
|
@ -47,4 +47,8 @@ export {
|
|||
getLocalLicense,
|
||||
getLocalStats,
|
||||
TelemetryLocalStats,
|
||||
DATA_TELEMETRY_ID,
|
||||
DataTelemetryIndex,
|
||||
DataTelemetryPayload,
|
||||
buildDataTelemetryPayload,
|
||||
} from './telemetry_collection';
|
||||
|
|
|
@ -179,23 +179,36 @@ describe('get_local_stats', () => {
|
|||
|
||||
describe('handleLocalStats', () => {
|
||||
it('returns expected object without xpack and kibana data', () => {
|
||||
const result = handleLocalStats(clusterInfo, clusterStatsWithNodesUsage, void 0, context);
|
||||
const result = handleLocalStats(
|
||||
clusterInfo,
|
||||
clusterStatsWithNodesUsage,
|
||||
void 0,
|
||||
void 0,
|
||||
context
|
||||
);
|
||||
expect(result.cluster_uuid).to.eql(combinedStatsResult.cluster_uuid);
|
||||
expect(result.cluster_name).to.eql(combinedStatsResult.cluster_name);
|
||||
expect(result.cluster_stats).to.eql(combinedStatsResult.cluster_stats);
|
||||
expect(result.version).to.be('2.3.4');
|
||||
expect(result.collection).to.be('local');
|
||||
expect(result.license).to.be(undefined);
|
||||
expect(result.stack_stats).to.eql({ kibana: undefined });
|
||||
expect(result.stack_stats).to.eql({ kibana: undefined, data: undefined });
|
||||
});
|
||||
|
||||
it('returns expected object with xpack', () => {
|
||||
const result = handleLocalStats(clusterInfo, clusterStatsWithNodesUsage, void 0, context);
|
||||
const result = handleLocalStats(
|
||||
clusterInfo,
|
||||
clusterStatsWithNodesUsage,
|
||||
void 0,
|
||||
void 0,
|
||||
context
|
||||
);
|
||||
const { stack_stats: stack, ...cluster } = result;
|
||||
expect(cluster.collection).to.be(combinedStatsResult.collection);
|
||||
expect(cluster.cluster_uuid).to.be(combinedStatsResult.cluster_uuid);
|
||||
expect(cluster.cluster_name).to.be(combinedStatsResult.cluster_name);
|
||||
expect(stack.kibana).to.be(undefined); // not mocked for this test
|
||||
expect(stack.data).to.be(undefined); // not mocked for this test
|
||||
|
||||
expect(cluster.version).to.eql(combinedStatsResult.version);
|
||||
expect(cluster.cluster_stats).to.eql(combinedStatsResult.cluster_stats);
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
export const DATA_TELEMETRY_ID = 'data';
|
||||
|
||||
export const DATA_KNOWN_TYPES = ['logs', 'traces', 'metrics'] as const;
|
||||
|
||||
export type DataTelemetryType = typeof DATA_KNOWN_TYPES[number];
|
||||
|
||||
export type DataPatternName = typeof DATA_DATASETS_INDEX_PATTERNS[number]['patternName'];
|
||||
|
||||
// TODO: Ideally this list should be updated from an external public URL (similar to the newsfeed)
|
||||
// But it's good to have a minimum list shipped with the build.
|
||||
export const DATA_DATASETS_INDEX_PATTERNS = [
|
||||
// Enterprise Search - Elastic
|
||||
{ pattern: '.ent-search-*', patternName: 'enterprise-search' },
|
||||
{ pattern: '.app-search-*', patternName: 'app-search' },
|
||||
// Enterprise Search - 3rd party
|
||||
{ pattern: '*magento2*', patternName: 'magento2' },
|
||||
{ pattern: '*magento*', patternName: 'magento' },
|
||||
{ pattern: '*shopify*', patternName: 'shopify' },
|
||||
{ pattern: '*wordpress*', patternName: 'wordpress' },
|
||||
// { pattern: '*wp*', patternName: 'wordpress' }, // TODO: Too vague?
|
||||
{ pattern: '*drupal*', patternName: 'drupal' },
|
||||
{ pattern: '*joomla*', patternName: 'joomla' },
|
||||
{ pattern: '*search*', patternName: 'search' }, // TODO: Too vague?
|
||||
// { pattern: '*wix*', patternName: 'wix' }, // TODO: Too vague?
|
||||
{ pattern: '*sharepoint*', patternName: 'sharepoint' },
|
||||
{ pattern: '*squarespace*', patternName: 'squarespace' },
|
||||
// { pattern: '*aem*', patternName: 'aem' }, // TODO: Too vague?
|
||||
{ pattern: '*sitecore*', patternName: 'sitecore' },
|
||||
{ pattern: '*weebly*', patternName: 'weebly' },
|
||||
{ pattern: '*acquia*', patternName: 'acquia' },
|
||||
|
||||
// Observability - Elastic
|
||||
{ pattern: 'filebeat-*', patternName: 'filebeat', shipper: 'filebeat' },
|
||||
{ pattern: 'metricbeat-*', patternName: 'metricbeat', shipper: 'metricbeat' },
|
||||
{ pattern: 'apm-*', patternName: 'apm', shipper: 'apm' },
|
||||
{ pattern: 'functionbeat-*', patternName: 'functionbeat', shipper: 'functionbeat' },
|
||||
{ pattern: 'heartbeat-*', patternName: 'heartbeat', shipper: 'heartbeat' },
|
||||
{ pattern: 'logstash-*', patternName: 'logstash', shipper: 'logstash' },
|
||||
// Observability - 3rd party
|
||||
{ pattern: 'fluentd*', patternName: 'fluentd' },
|
||||
{ pattern: 'telegraf*', patternName: 'telegraf' },
|
||||
{ pattern: 'prometheusbeat*', patternName: 'prometheusbeat' },
|
||||
{ pattern: 'fluentbit*', patternName: 'fluentbit' },
|
||||
{ pattern: '*nginx*', patternName: 'nginx' },
|
||||
{ pattern: '*apache*', patternName: 'apache' }, // Already in Security (keeping it in here for documentation)
|
||||
// { pattern: '*logs*', patternName: 'third-party-logs' }, Disabled for now
|
||||
|
||||
// Security - Elastic
|
||||
{ pattern: 'logstash-*', patternName: 'logstash', shipper: 'logstash' },
|
||||
{ pattern: 'endgame-*', patternName: 'endgame', shipper: 'endgame' },
|
||||
{ pattern: 'logs-endpoint.*', patternName: 'logs-endpoint', shipper: 'endpoint' }, // It should be caught by the `mappings` logic, but just in case
|
||||
{ pattern: 'metrics-endpoint.*', patternName: 'metrics-endpoint', shipper: 'endpoint' }, // It should be caught by the `mappings` logic, but just in case
|
||||
{ pattern: '.siem-signals-*', patternName: 'siem-signals' },
|
||||
{ pattern: 'auditbeat-*', patternName: 'auditbeat', shipper: 'auditbeat' },
|
||||
{ pattern: 'winlogbeat-*', patternName: 'winlogbeat', shipper: 'winlogbeat' },
|
||||
{ pattern: 'packetbeat-*', patternName: 'packetbeat', shipper: 'packetbeat' },
|
||||
{ pattern: 'filebeat-*', patternName: 'filebeat', shipper: 'filebeat' },
|
||||
// Security - 3rd party
|
||||
{ pattern: '*apache*', patternName: 'apache' }, // Already in Observability (keeping it in here for documentation)
|
||||
{ pattern: '*tomcat*', patternName: 'tomcat' },
|
||||
{ pattern: '*artifactory*', patternName: 'artifactory' },
|
||||
{ pattern: '*aruba*', patternName: 'aruba' },
|
||||
{ pattern: '*barracuda*', patternName: 'barracuda' },
|
||||
{ pattern: '*bluecoat*', patternName: 'bluecoat' },
|
||||
{ pattern: 'arcsight-*', patternName: 'arcsight', shipper: 'arcsight' },
|
||||
// { pattern: '*cef*', patternName: 'cef' }, // Disabled because it's too vague
|
||||
{ pattern: '*checkpoint*', patternName: 'checkpoint' },
|
||||
{ pattern: '*cisco*', patternName: 'cisco' },
|
||||
{ pattern: '*citrix*', patternName: 'citrix' },
|
||||
{ pattern: '*cyberark*', patternName: 'cyberark' },
|
||||
{ pattern: '*cylance*', patternName: 'cylance' },
|
||||
{ pattern: '*fireeye*', patternName: 'fireeye' },
|
||||
{ pattern: '*fortinet*', patternName: 'fortinet' },
|
||||
{ pattern: '*infoblox*', patternName: 'infoblox' },
|
||||
{ pattern: '*kaspersky*', patternName: 'kaspersky' },
|
||||
{ pattern: '*mcafee*', patternName: 'mcafee' },
|
||||
// paloaltonetworks
|
||||
{ pattern: '*paloaltonetworks*', patternName: 'paloaltonetworks' },
|
||||
{ pattern: 'pan-*', patternName: 'paloaltonetworks' },
|
||||
{ pattern: 'pan_*', patternName: 'paloaltonetworks' },
|
||||
{ pattern: 'pan.*', patternName: 'paloaltonetworks' },
|
||||
|
||||
// rsa
|
||||
{ pattern: 'rsa.*', patternName: 'rsa' },
|
||||
{ pattern: 'rsa-*', patternName: 'rsa' },
|
||||
{ pattern: 'rsa_*', patternName: 'rsa' },
|
||||
|
||||
// snort
|
||||
{ pattern: 'snort-*', patternName: 'snort' },
|
||||
{ pattern: 'logstash-snort*', patternName: 'snort' },
|
||||
|
||||
{ pattern: '*sonicwall*', patternName: 'sonicwall' },
|
||||
{ pattern: '*sophos*', patternName: 'sophos' },
|
||||
|
||||
// squid
|
||||
{ pattern: 'squid-*', patternName: 'squid' },
|
||||
{ pattern: 'squid_*', patternName: 'squid' },
|
||||
{ pattern: 'squid.*', patternName: 'squid' },
|
||||
|
||||
{ pattern: '*symantec*', patternName: 'symantec' },
|
||||
{ pattern: '*tippingpoint*', patternName: 'tippingpoint' },
|
||||
{ pattern: '*trendmicro*', patternName: 'trendmicro' },
|
||||
{ pattern: '*tripwire*', patternName: 'tripwire' },
|
||||
{ pattern: '*zscaler*', patternName: 'zscaler' },
|
||||
{ pattern: '*zeek*', patternName: 'zeek' },
|
||||
{ pattern: '*sigma_doc*', patternName: 'sigma_doc' },
|
||||
// { pattern: '*bro*', patternName: 'bro' }, // Disabled because it's too vague
|
||||
{ pattern: 'ecs-corelight*', patternName: 'ecs-corelight' },
|
||||
{ pattern: '*suricata*', patternName: 'suricata' },
|
||||
// { pattern: '*fsf*', patternName: 'fsf' }, // Disabled because it's too vague
|
||||
{ pattern: '*wazuh*', patternName: 'wazuh' },
|
||||
] as const;
|
||||
|
||||
// Get the unique list of index patterns (some are duplicated for documentation purposes)
|
||||
export const DATA_DATASETS_INDEX_PATTERNS_UNIQUE = DATA_DATASETS_INDEX_PATTERNS.filter(
|
||||
(entry, index, array) => !array.slice(0, index).find(({ pattern }) => entry.pattern === pattern)
|
||||
);
|
|
@ -0,0 +1,251 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { buildDataTelemetryPayload, getDataTelemetry } from './get_data_telemetry';
|
||||
import { DATA_DATASETS_INDEX_PATTERNS, DATA_DATASETS_INDEX_PATTERNS_UNIQUE } from './constants';
|
||||
|
||||
describe('get_data_telemetry', () => {
|
||||
describe('DATA_DATASETS_INDEX_PATTERNS', () => {
|
||||
DATA_DATASETS_INDEX_PATTERNS.forEach((entry, index, array) => {
|
||||
describe(`Pattern ${entry.pattern}`, () => {
|
||||
test('there should only be one in DATA_DATASETS_INDEX_PATTERNS_UNIQUE', () => {
|
||||
expect(
|
||||
DATA_DATASETS_INDEX_PATTERNS_UNIQUE.filter(({ pattern }) => pattern === entry.pattern)
|
||||
).toHaveLength(1);
|
||||
});
|
||||
|
||||
// This test is to make us sure that we don't update one of the duplicated entries and forget about any other repeated ones
|
||||
test('when a document is duplicated, the duplicates should be identical', () => {
|
||||
array.slice(0, index).forEach((previousEntry) => {
|
||||
if (entry.pattern === previousEntry.pattern) {
|
||||
expect(entry).toStrictEqual(previousEntry);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('buildDataTelemetryPayload', () => {
|
||||
test('return the base object when no indices provided', () => {
|
||||
expect(buildDataTelemetryPayload([])).toStrictEqual([]);
|
||||
});
|
||||
|
||||
test('return the base object when no matching indices provided', () => {
|
||||
expect(
|
||||
buildDataTelemetryPayload([
|
||||
{ name: 'no__way__this__can_match_anything', sizeInBytes: 10 },
|
||||
{ name: '.kibana-event-log-8.0.0' },
|
||||
])
|
||||
).toStrictEqual([]);
|
||||
});
|
||||
|
||||
test('matches some indices and puts them in their own category', () => {
|
||||
expect(
|
||||
buildDataTelemetryPayload([
|
||||
// APM Indices have known shipper (so we can infer the datasetType from mapping constant)
|
||||
{ name: 'apm-7.7.0-error-000001', shipper: 'apm', isECS: true },
|
||||
{ name: 'apm-7.7.0-metric-000001', shipper: 'apm', isECS: true },
|
||||
{ name: 'apm-7.7.0-onboarding-2020.05.17', shipper: 'apm', isECS: true },
|
||||
{ name: 'apm-7.7.0-profile-000001', shipper: 'apm', isECS: true },
|
||||
{ name: 'apm-7.7.0-span-000001', shipper: 'apm', isECS: true },
|
||||
{ name: 'apm-7.7.0-transaction-000001', shipper: 'apm', isECS: true },
|
||||
// Packetbeat indices with known shipper (we can infer datasetType from mapping constant)
|
||||
{ name: 'packetbeat-7.7.0-2020.06.11-000001', shipper: 'packetbeat', isECS: true },
|
||||
// Matching patterns from the list => known datasetName but the rest is unknown
|
||||
{ name: 'filebeat-12314', docCount: 100, sizeInBytes: 10 },
|
||||
{ name: 'metricbeat-1234', docCount: 100, sizeInBytes: 10, isECS: false },
|
||||
{ name: '.app-search-1234', docCount: 0 },
|
||||
{ name: 'logs-endpoint.1234', docCount: 0 }, // Matching pattern with a dot in the name
|
||||
// New Indexing strategy: everything can be inferred from the constant_keyword values
|
||||
{
|
||||
name: 'logs-nginx.access-default-000001',
|
||||
datasetName: 'nginx.access',
|
||||
datasetType: 'logs',
|
||||
shipper: 'filebeat',
|
||||
isECS: true,
|
||||
docCount: 1000,
|
||||
sizeInBytes: 1000,
|
||||
},
|
||||
{
|
||||
name: 'logs-nginx.access-default-000002',
|
||||
datasetName: 'nginx.access',
|
||||
datasetType: 'logs',
|
||||
shipper: 'filebeat',
|
||||
isECS: true,
|
||||
docCount: 1000,
|
||||
sizeInBytes: 60,
|
||||
},
|
||||
])
|
||||
).toStrictEqual([
|
||||
{
|
||||
shipper: 'apm',
|
||||
index_count: 6,
|
||||
ecs_index_count: 6,
|
||||
},
|
||||
{
|
||||
shipper: 'packetbeat',
|
||||
index_count: 1,
|
||||
ecs_index_count: 1,
|
||||
},
|
||||
{
|
||||
pattern_name: 'filebeat',
|
||||
shipper: 'filebeat',
|
||||
index_count: 1,
|
||||
doc_count: 100,
|
||||
size_in_bytes: 10,
|
||||
},
|
||||
{
|
||||
pattern_name: 'metricbeat',
|
||||
shipper: 'metricbeat',
|
||||
index_count: 1,
|
||||
ecs_index_count: 0,
|
||||
doc_count: 100,
|
||||
size_in_bytes: 10,
|
||||
},
|
||||
{
|
||||
pattern_name: 'app-search',
|
||||
index_count: 1,
|
||||
doc_count: 0,
|
||||
},
|
||||
{
|
||||
pattern_name: 'logs-endpoint',
|
||||
shipper: 'endpoint',
|
||||
index_count: 1,
|
||||
doc_count: 0,
|
||||
},
|
||||
{
|
||||
dataset: { name: 'nginx.access', type: 'logs' },
|
||||
shipper: 'filebeat',
|
||||
index_count: 2,
|
||||
ecs_index_count: 2,
|
||||
doc_count: 2000,
|
||||
size_in_bytes: 1060,
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getDataTelemetry', () => {
|
||||
test('it returns the base payload (all 0s) because no indices are found', async () => {
|
||||
const callCluster = mockCallCluster();
|
||||
await expect(getDataTelemetry(callCluster)).resolves.toStrictEqual([]);
|
||||
});
|
||||
|
||||
test('can only see the index mappings, but not the stats', async () => {
|
||||
const callCluster = mockCallCluster(['filebeat-12314']);
|
||||
await expect(getDataTelemetry(callCluster)).resolves.toStrictEqual([
|
||||
{
|
||||
pattern_name: 'filebeat',
|
||||
shipper: 'filebeat',
|
||||
index_count: 1,
|
||||
ecs_index_count: 0,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
test('can see the mappings and the stats', async () => {
|
||||
const callCluster = mockCallCluster(
|
||||
['filebeat-12314'],
|
||||
{ isECS: true },
|
||||
{
|
||||
indices: {
|
||||
'filebeat-12314': { total: { docs: { count: 100 }, store: { size_in_bytes: 10 } } },
|
||||
},
|
||||
}
|
||||
);
|
||||
await expect(getDataTelemetry(callCluster)).resolves.toStrictEqual([
|
||||
{
|
||||
pattern_name: 'filebeat',
|
||||
shipper: 'filebeat',
|
||||
index_count: 1,
|
||||
ecs_index_count: 1,
|
||||
doc_count: 100,
|
||||
size_in_bytes: 10,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
test('find an index that does not match any index pattern but has mappings metadata', async () => {
|
||||
const callCluster = mockCallCluster(
|
||||
['cannot_match_anything'],
|
||||
{ isECS: true, datasetType: 'traces', shipper: 'my-beat' },
|
||||
{
|
||||
indices: {
|
||||
cannot_match_anything: {
|
||||
total: { docs: { count: 100 }, store: { size_in_bytes: 10 } },
|
||||
},
|
||||
},
|
||||
}
|
||||
);
|
||||
await expect(getDataTelemetry(callCluster)).resolves.toStrictEqual([
|
||||
{
|
||||
dataset: { name: undefined, type: 'traces' },
|
||||
shipper: 'my-beat',
|
||||
index_count: 1,
|
||||
ecs_index_count: 1,
|
||||
doc_count: 100,
|
||||
size_in_bytes: 10,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
test('return empty array when there is an error', async () => {
|
||||
const callCluster = jest.fn().mockRejectedValue(new Error('Something went terribly wrong'));
|
||||
await expect(getDataTelemetry(callCluster)).resolves.toStrictEqual([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
function mockCallCluster(
|
||||
indicesMappings: string[] = [],
|
||||
{ isECS = false, datasetName = '', datasetType = '', shipper = '' } = {},
|
||||
indexStats: any = {}
|
||||
) {
|
||||
return jest.fn().mockImplementation(async (method: string, opts: any) => {
|
||||
if (method === 'indices.getMapping') {
|
||||
return Object.fromEntries(
|
||||
indicesMappings.map((index) => [
|
||||
index,
|
||||
{
|
||||
mappings: {
|
||||
...(shipper && { _meta: { beat: shipper } }),
|
||||
properties: {
|
||||
...(isECS && { ecs: { properties: { version: { type: 'keyword' } } } }),
|
||||
...((datasetType || datasetName) && {
|
||||
dataset: {
|
||||
properties: {
|
||||
...(datasetName && {
|
||||
name: { type: 'constant_keyword', value: datasetName },
|
||||
}),
|
||||
...(datasetType && {
|
||||
type: { type: 'constant_keyword', value: datasetType },
|
||||
}),
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
])
|
||||
);
|
||||
}
|
||||
return indexStats;
|
||||
});
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { LegacyAPICaller } from 'kibana/server';
|
||||
import {
|
||||
DATA_DATASETS_INDEX_PATTERNS_UNIQUE,
|
||||
DataPatternName,
|
||||
DataTelemetryType,
|
||||
} from './constants';
|
||||
|
||||
export interface DataTelemetryBasePayload {
|
||||
index_count: number;
|
||||
ecs_index_count?: number;
|
||||
doc_count?: number;
|
||||
size_in_bytes?: number;
|
||||
}
|
||||
|
||||
export interface DataTelemetryDocument extends DataTelemetryBasePayload {
|
||||
dataset?: {
|
||||
name?: string;
|
||||
type?: DataTelemetryType | 'unknown' | string; // The union of types is to help autocompletion with some known `dataset.type`s
|
||||
};
|
||||
shipper?: string;
|
||||
pattern_name?: DataPatternName;
|
||||
}
|
||||
|
||||
export type DataTelemetryPayload = DataTelemetryDocument[];
|
||||
|
||||
export interface DataTelemetryIndex {
|
||||
name: string;
|
||||
datasetName?: string; // To be obtained from `mappings.dataset.name` if it's a constant keyword
|
||||
datasetType?: string; // To be obtained from `mappings.dataset.type` if it's a constant keyword
|
||||
shipper?: string; // To be obtained from `_meta.beat` if it's set
|
||||
isECS?: boolean; // Optional because it can't be obtained via Monitoring.
|
||||
|
||||
// The fields below are optional because we might not be able to obtain them if the user does not
|
||||
// have access to the index.
|
||||
docCount?: number;
|
||||
sizeInBytes?: number;
|
||||
}
|
||||
|
||||
type AtLeastOne<T, U = { [K in keyof T]: Pick<T, K> }> = Partial<T> & U[keyof U];
|
||||
|
||||
type DataDescriptor = AtLeastOne<{
|
||||
datasetName: string;
|
||||
datasetType: string;
|
||||
shipper: string;
|
||||
patternName: DataPatternName; // When found from the list of the index patterns
|
||||
}>;
|
||||
|
||||
function findMatchingDescriptors({
|
||||
name,
|
||||
shipper,
|
||||
datasetName,
|
||||
datasetType,
|
||||
}: DataTelemetryIndex): DataDescriptor[] {
|
||||
// If we already have the data from the indices' mappings...
|
||||
if ([shipper, datasetName, datasetType].some(Boolean)) {
|
||||
return [
|
||||
{
|
||||
...(shipper && { shipper }),
|
||||
...(datasetName && { datasetName }),
|
||||
...(datasetType && { datasetType }),
|
||||
} as AtLeastOne<{ datasetName: string; datasetType: string; shipper: string }>, // Using casting here because TS doesn't infer at least one exists from the if clause
|
||||
];
|
||||
}
|
||||
|
||||
// Otherwise, try with the list of known index patterns
|
||||
return DATA_DATASETS_INDEX_PATTERNS_UNIQUE.filter(({ pattern }) => {
|
||||
if (!pattern.startsWith('.') && name.startsWith('.')) {
|
||||
// avoid system indices caught by very fuzzy index patterns (i.e.: *log* would catch `.kibana-log-...`)
|
||||
return false;
|
||||
}
|
||||
return new RegExp(`^${pattern.replace(/\./g, '\\.').replace(/\*/g, '.*')}$`).test(name);
|
||||
});
|
||||
}
|
||||
|
||||
function increaseCounters(
|
||||
previousValue: DataTelemetryBasePayload = { index_count: 0 },
|
||||
{ isECS, docCount, sizeInBytes }: DataTelemetryIndex
|
||||
) {
|
||||
return {
|
||||
...previousValue,
|
||||
index_count: previousValue.index_count + 1,
|
||||
...(typeof isECS === 'boolean'
|
||||
? {
|
||||
ecs_index_count: (previousValue.ecs_index_count || 0) + (isECS ? 1 : 0),
|
||||
}
|
||||
: {}),
|
||||
...(typeof docCount === 'number'
|
||||
? { doc_count: (previousValue.doc_count || 0) + docCount }
|
||||
: {}),
|
||||
...(typeof sizeInBytes === 'number'
|
||||
? { size_in_bytes: (previousValue.size_in_bytes || 0) + sizeInBytes }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildDataTelemetryPayload(indices: DataTelemetryIndex[]): DataTelemetryPayload {
|
||||
const startingDotPatternsUntilTheFirstAsterisk = DATA_DATASETS_INDEX_PATTERNS_UNIQUE.map(
|
||||
({ pattern }) => pattern.replace(/^\.(.+)\*.*$/g, '.$1')
|
||||
).filter(Boolean);
|
||||
|
||||
// Filter out the system indices unless they are required by the patterns
|
||||
const indexCandidates = indices.filter(
|
||||
({ name }) =>
|
||||
!(
|
||||
name.startsWith('.') &&
|
||||
!startingDotPatternsUntilTheFirstAsterisk.find((pattern) => name.startsWith(pattern))
|
||||
)
|
||||
);
|
||||
|
||||
const acc = new Map<string, DataTelemetryDocument>();
|
||||
|
||||
for (const indexCandidate of indexCandidates) {
|
||||
const matchingDescriptors = findMatchingDescriptors(indexCandidate);
|
||||
for (const { datasetName, datasetType, shipper, patternName } of matchingDescriptors) {
|
||||
const key = `${datasetName}-${datasetType}-${shipper}-${patternName}`;
|
||||
acc.set(key, {
|
||||
...((datasetName || datasetType) && { dataset: { name: datasetName, type: datasetType } }),
|
||||
...(shipper && { shipper }),
|
||||
...(patternName && { pattern_name: patternName }),
|
||||
...increaseCounters(acc.get(key), indexCandidate),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return [...acc.values()];
|
||||
}
|
||||
|
||||
interface IndexStats {
|
||||
indices: {
|
||||
[indexName: string]: {
|
||||
total: {
|
||||
docs: {
|
||||
count: number;
|
||||
deleted: number;
|
||||
};
|
||||
store: {
|
||||
size_in_bytes: number;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
interface IndexMappings {
|
||||
[indexName: string]: {
|
||||
mappings: {
|
||||
_meta?: {
|
||||
beat?: string;
|
||||
};
|
||||
properties: {
|
||||
dataset?: {
|
||||
properties: {
|
||||
name?: {
|
||||
type: string;
|
||||
value?: string;
|
||||
};
|
||||
type?: {
|
||||
type: string;
|
||||
value?: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
ecs?: {
|
||||
properties: {
|
||||
version?: {
|
||||
type: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
export async function getDataTelemetry(callCluster: LegacyAPICaller) {
|
||||
try {
|
||||
const index = [
|
||||
...DATA_DATASETS_INDEX_PATTERNS_UNIQUE.map(({ pattern }) => pattern),
|
||||
'*-*-*-*', // Include new indexing strategy indices {type}-{dataset}-{namespace}-{rollover_counter}
|
||||
];
|
||||
const [indexMappings, indexStats]: [IndexMappings, IndexStats] = await Promise.all([
|
||||
// GET */_mapping?filter_path=*.mappings._meta.beat,*.mappings.properties.ecs.properties.version.type,*.mappings.properties.dataset.properties.type.value,*.mappings.properties.dataset.properties.name.value
|
||||
callCluster('indices.getMapping', {
|
||||
index: '*', // Request all indices because filter_path already filters out the indices without any of those fields
|
||||
filterPath: [
|
||||
// _meta.beat tells the shipper
|
||||
'*.mappings._meta.beat',
|
||||
// Does it have `ecs.version` in the mappings? => It follows the ECS conventions
|
||||
'*.mappings.properties.ecs.properties.version.type',
|
||||
|
||||
// Disable the fields below because they are still pending to be confirmed:
|
||||
// https://github.com/elastic/ecs/pull/845
|
||||
// TODO: Re-enable when the final fields are confirmed
|
||||
// // If `dataset.type` is a `constant_keyword`, it can be reported as a type
|
||||
// '*.mappings.properties.dataset.properties.type.value',
|
||||
// // If `dataset.name` is a `constant_keyword`, it can be reported as the dataset
|
||||
// '*.mappings.properties.dataset.properties.name.value',
|
||||
],
|
||||
}),
|
||||
// GET <index>/_stats/docs,store?level=indices&filter_path=indices.*.total
|
||||
callCluster<IndexStats>('indices.stats', {
|
||||
index,
|
||||
level: 'indices',
|
||||
metric: ['docs', 'store'],
|
||||
filterPath: ['indices.*.total'],
|
||||
}),
|
||||
]);
|
||||
|
||||
const indexNames = Object.keys({ ...indexMappings, ...indexStats?.indices });
|
||||
const indices = indexNames.map((name) => {
|
||||
const isECS = !!indexMappings[name]?.mappings?.properties.ecs?.properties.version?.type;
|
||||
const shipper = indexMappings[name]?.mappings?._meta?.beat;
|
||||
const datasetName = indexMappings[name]?.mappings?.properties.dataset?.properties.name?.value;
|
||||
const datasetType = indexMappings[name]?.mappings?.properties.dataset?.properties.type?.value;
|
||||
|
||||
const stats = (indexStats?.indices || {})[name];
|
||||
if (stats) {
|
||||
return {
|
||||
name,
|
||||
datasetName,
|
||||
datasetType,
|
||||
shipper,
|
||||
isECS,
|
||||
docCount: stats.total?.docs?.count,
|
||||
sizeInBytes: stats.total?.store?.size_in_bytes,
|
||||
};
|
||||
}
|
||||
return { name, datasetName, datasetType, shipper, isECS };
|
||||
});
|
||||
return buildDataTelemetryPayload(indices);
|
||||
} catch (e) {
|
||||
return [];
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
export { DATA_TELEMETRY_ID } from './constants';
|
||||
|
||||
export {
|
||||
DataTelemetryIndex,
|
||||
DataTelemetryPayload,
|
||||
getDataTelemetry,
|
||||
buildDataTelemetryPayload,
|
||||
} from './get_data_telemetry';
|
|
@ -25,6 +25,7 @@ import { getClusterInfo, ESClusterInfo } from './get_cluster_info';
|
|||
import { getClusterStats } from './get_cluster_stats';
|
||||
import { getKibana, handleKibanaStats, KibanaUsageStats } from './get_kibana';
|
||||
import { getNodesUsage } from './get_nodes_usage';
|
||||
import { getDataTelemetry, DATA_TELEMETRY_ID, DataTelemetryPayload } from './get_data_telemetry';
|
||||
|
||||
/**
|
||||
* Handle the separate local calls by combining them into a single object response that looks like the
|
||||
|
@ -39,6 +40,7 @@ export function handleLocalStats(
|
|||
{ cluster_name, cluster_uuid, version }: ESClusterInfo,
|
||||
{ _nodes, cluster_name: clusterName, ...clusterStats }: any,
|
||||
kibana: KibanaUsageStats,
|
||||
dataTelemetry: DataTelemetryPayload,
|
||||
context: StatsCollectionContext
|
||||
) {
|
||||
return {
|
||||
|
@ -49,6 +51,7 @@ export function handleLocalStats(
|
|||
cluster_stats: clusterStats,
|
||||
collection: 'local',
|
||||
stack_stats: {
|
||||
[DATA_TELEMETRY_ID]: dataTelemetry,
|
||||
kibana: handleKibanaStats(context, kibana),
|
||||
},
|
||||
};
|
||||
|
@ -68,11 +71,12 @@ export const getLocalStats: StatsGetter<{}, TelemetryLocalStats> = async (
|
|||
|
||||
return await Promise.all(
|
||||
clustersDetails.map(async (clustersDetail) => {
|
||||
const [clusterInfo, clusterStats, nodesUsage, kibana] = await Promise.all([
|
||||
const [clusterInfo, clusterStats, nodesUsage, kibana, dataTelemetry] = await Promise.all([
|
||||
getClusterInfo(callCluster), // cluster info
|
||||
getClusterStats(callCluster), // cluster stats (not to be confused with cluster _state_)
|
||||
getNodesUsage(callCluster), // nodes_usage info
|
||||
getKibana(usageCollection, callCluster),
|
||||
getDataTelemetry(callCluster),
|
||||
]);
|
||||
return handleLocalStats(
|
||||
clusterInfo,
|
||||
|
@ -81,6 +85,7 @@ export const getLocalStats: StatsGetter<{}, TelemetryLocalStats> = async (
|
|||
nodes: { ...clusterStats.nodes, usage: nodesUsage },
|
||||
},
|
||||
kibana,
|
||||
dataTelemetry,
|
||||
context
|
||||
);
|
||||
})
|
||||
|
|
|
@ -17,6 +17,12 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
export {
|
||||
DATA_TELEMETRY_ID,
|
||||
DataTelemetryIndex,
|
||||
DataTelemetryPayload,
|
||||
buildDataTelemetryPayload,
|
||||
} from './get_data_telemetry';
|
||||
export { getLocalStats, TelemetryLocalStats } from './get_local_stats';
|
||||
export { getLocalLicense } from './get_local_license';
|
||||
export { getClusterUuids } from './get_cluster_stats';
|
||||
|
|
|
@ -37,8 +37,17 @@ function flatKeys(source) {
|
|||
|
||||
export default function ({ getService }) {
|
||||
const supertest = getService('supertest');
|
||||
const es = getService('es');
|
||||
|
||||
describe('/api/telemetry/v2/clusters/_stats', () => {
|
||||
before('create some telemetry-data tracked indices', async () => {
|
||||
return es.indices.create({ index: 'filebeat-telemetry_tests_logs' });
|
||||
});
|
||||
|
||||
after('cleanup telemetry-data tracked indices', () => {
|
||||
return es.indices.delete({ index: 'filebeat-telemetry_tests_logs' });
|
||||
});
|
||||
|
||||
it('should pull local stats and validate data types', async () => {
|
||||
const timeRange = {
|
||||
min: '2018-07-23T22:07:00Z',
|
||||
|
@ -71,6 +80,17 @@ export default function ({ getService }) {
|
|||
expect(stats.stack_stats.kibana.plugins.csp.strict).to.be(true);
|
||||
expect(stats.stack_stats.kibana.plugins.csp.warnLegacyBrowsers).to.be(true);
|
||||
expect(stats.stack_stats.kibana.plugins.csp.rulesChangedFromDefault).to.be(false);
|
||||
|
||||
// Testing stack_stats.data
|
||||
expect(stats.stack_stats.data).to.be.an('object');
|
||||
expect(stats.stack_stats.data).to.be.an('array');
|
||||
expect(stats.stack_stats.data[0]).to.be.an('object');
|
||||
expect(stats.stack_stats.data[0].pattern_name).to.be('filebeat');
|
||||
expect(stats.stack_stats.data[0].shipper).to.be('filebeat');
|
||||
expect(stats.stack_stats.data[0].index_count).to.be(1);
|
||||
expect(stats.stack_stats.data[0].doc_count).to.be(0);
|
||||
expect(stats.stack_stats.data[0].ecs_index_count).to.be(0);
|
||||
expect(stats.stack_stats.data[0].size_in_bytes).to.be.greaterThan(0);
|
||||
});
|
||||
|
||||
it('should pull local stats and validate fields', async () => {
|
||||
|
|
|
@ -77,11 +77,11 @@ export function handleResponse(resp, min, max, shardStats) {
|
|||
});
|
||||
}
|
||||
|
||||
export function getIndices(req, esIndexPattern, showSystemIndices = false, shardStats) {
|
||||
checkParam(esIndexPattern, 'esIndexPattern in elasticsearch/getIndices');
|
||||
|
||||
const { min, max } = req.payload.timeRange;
|
||||
|
||||
export function buildGetIndicesQuery(
|
||||
esIndexPattern,
|
||||
clusterUuid,
|
||||
{ start, end, size, showSystemIndices = false }
|
||||
) {
|
||||
const filters = [];
|
||||
if (!showSystemIndices) {
|
||||
filters.push({
|
||||
|
@ -90,14 +90,11 @@ export function getIndices(req, esIndexPattern, showSystemIndices = false, shard
|
|||
},
|
||||
});
|
||||
}
|
||||
|
||||
const clusterUuid = req.params.clusterUuid;
|
||||
const metricFields = ElasticsearchMetric.getMetricFields();
|
||||
const config = req.server.config();
|
||||
const params = {
|
||||
|
||||
return {
|
||||
index: esIndexPattern,
|
||||
// TODO: composite aggregation
|
||||
size: config.get('monitoring.ui.max_bucket_size'),
|
||||
size,
|
||||
ignoreUnavailable: true,
|
||||
filterPath: [
|
||||
// only filter path can filter for inner_hits
|
||||
|
@ -118,8 +115,8 @@ export function getIndices(req, esIndexPattern, showSystemIndices = false, shard
|
|||
body: {
|
||||
query: createQuery({
|
||||
type: 'index_stats',
|
||||
start: min,
|
||||
end: max,
|
||||
start,
|
||||
end,
|
||||
clusterUuid,
|
||||
metric: metricFields,
|
||||
filters,
|
||||
|
@ -135,9 +132,24 @@ export function getIndices(req, esIndexPattern, showSystemIndices = false, shard
|
|||
sort: [{ timestamp: { order: 'desc' } }],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function getIndices(req, esIndexPattern, showSystemIndices = false, shardStats) {
|
||||
checkParam(esIndexPattern, 'esIndexPattern in elasticsearch/getIndices');
|
||||
|
||||
const { min: start, max: end } = req.payload.timeRange;
|
||||
|
||||
const clusterUuid = req.params.clusterUuid;
|
||||
const config = req.server.config();
|
||||
const params = buildGetIndicesQuery(esIndexPattern, clusterUuid, {
|
||||
start,
|
||||
end,
|
||||
showSystemIndices,
|
||||
size: config.get('monitoring.ui.max_bucket_size'),
|
||||
});
|
||||
|
||||
const { callWithRequest } = req.server.plugins.elasticsearch.getCluster('monitoring');
|
||||
return callWithRequest(req, 'search', params).then((resp) =>
|
||||
handleResponse(resp, min, max, shardStats)
|
||||
handleResponse(resp, start, end, shardStats)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -4,5 +4,5 @@
|
|||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
export { getIndices } from './get_indices';
|
||||
export { getIndices, buildGetIndicesQuery } from './get_indices';
|
||||
export { getIndexSummary } from './get_index_summary';
|
||||
|
|
|
@ -1,158 +1,158 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`Telemetry Collection: Get Aggregated Stats OSS-like telemetry (no license nor X-Pack telemetry) 1`] = `
|
||||
Array [
|
||||
Object {
|
||||
"cluster_name": "test",
|
||||
"cluster_stats": Object {
|
||||
"nodes": Object {
|
||||
"usage": Object {
|
||||
"nodes": Array [
|
||||
Object {
|
||||
"aggregations": Object {
|
||||
"terms": Object {
|
||||
"bytes": 2,
|
||||
},
|
||||
},
|
||||
"node_id": "some_node_id",
|
||||
"rest_actions": Object {
|
||||
"nodes_usage_action": 1,
|
||||
},
|
||||
"since": 1588616945163,
|
||||
"timestamp": 1588617023177,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
"cluster_uuid": "test",
|
||||
"collection": "local",
|
||||
"stack_stats": Object {
|
||||
"kibana": Object {
|
||||
"count": 1,
|
||||
"great": "googlymoogly",
|
||||
"indices": 1,
|
||||
"os": Object {
|
||||
"platformReleases": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platformRelease": "iv",
|
||||
},
|
||||
],
|
||||
"platforms": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platform": "rocky",
|
||||
},
|
||||
],
|
||||
},
|
||||
"plugins": Object {
|
||||
"clouds": Object {
|
||||
"chances": 95,
|
||||
},
|
||||
"localization": Object {
|
||||
"integrities": Object {},
|
||||
"labelsCount": 0,
|
||||
"locale": "en",
|
||||
},
|
||||
"rain": Object {
|
||||
"chances": 2,
|
||||
},
|
||||
"snow": Object {
|
||||
"chances": 0,
|
||||
},
|
||||
"sun": Object {
|
||||
"chances": 5,
|
||||
},
|
||||
},
|
||||
"versions": Array [
|
||||
Object {
|
||||
"cluster_name": "test",
|
||||
"cluster_stats": Object {
|
||||
"nodes": Object {
|
||||
"usage": Object {
|
||||
"nodes": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"version": "8675309",
|
||||
"aggregations": Object {
|
||||
"terms": Object {
|
||||
"bytes": 2,
|
||||
},
|
||||
},
|
||||
"node_id": "some_node_id",
|
||||
"rest_actions": Object {
|
||||
"nodes_usage_action": 1,
|
||||
},
|
||||
"since": 1588616945163,
|
||||
"timestamp": 1588617023177,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
"version": "8.0.0",
|
||||
},
|
||||
]
|
||||
"cluster_uuid": "test",
|
||||
"collection": "local",
|
||||
"stack_stats": Object {
|
||||
"data": Array [],
|
||||
"kibana": Object {
|
||||
"count": 1,
|
||||
"great": "googlymoogly",
|
||||
"indices": 1,
|
||||
"os": Object {
|
||||
"platformReleases": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platformRelease": "iv",
|
||||
},
|
||||
],
|
||||
"platforms": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platform": "rocky",
|
||||
},
|
||||
],
|
||||
},
|
||||
"plugins": Object {
|
||||
"clouds": Object {
|
||||
"chances": 95,
|
||||
},
|
||||
"localization": Object {
|
||||
"integrities": Object {},
|
||||
"labelsCount": 0,
|
||||
"locale": "en",
|
||||
},
|
||||
"rain": Object {
|
||||
"chances": 2,
|
||||
},
|
||||
"snow": Object {
|
||||
"chances": 0,
|
||||
},
|
||||
"sun": Object {
|
||||
"chances": 5,
|
||||
},
|
||||
},
|
||||
"versions": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"version": "8675309",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
"timestamp": Any<String>,
|
||||
"version": "8.0.0",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`Telemetry Collection: Get Aggregated Stats X-Pack telemetry (license + X-Pack) 1`] = `
|
||||
Array [
|
||||
Object {
|
||||
"cluster_name": "test",
|
||||
"cluster_stats": Object {
|
||||
"nodes": Object {
|
||||
"usage": Object {
|
||||
"nodes": Array [
|
||||
Object {
|
||||
"aggregations": Object {
|
||||
"terms": Object {
|
||||
"bytes": 2,
|
||||
},
|
||||
},
|
||||
"node_id": "some_node_id",
|
||||
"rest_actions": Object {
|
||||
"nodes_usage_action": 1,
|
||||
},
|
||||
"since": 1588616945163,
|
||||
"timestamp": 1588617023177,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
"cluster_uuid": "test",
|
||||
"collection": "local",
|
||||
"stack_stats": Object {
|
||||
"kibana": Object {
|
||||
"count": 1,
|
||||
"great": "googlymoogly",
|
||||
"indices": 1,
|
||||
"os": Object {
|
||||
"platformReleases": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platformRelease": "iv",
|
||||
},
|
||||
],
|
||||
"platforms": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platform": "rocky",
|
||||
},
|
||||
],
|
||||
},
|
||||
"plugins": Object {
|
||||
"clouds": Object {
|
||||
"chances": 95,
|
||||
},
|
||||
"localization": Object {
|
||||
"integrities": Object {},
|
||||
"labelsCount": 0,
|
||||
"locale": "en",
|
||||
},
|
||||
"rain": Object {
|
||||
"chances": 2,
|
||||
},
|
||||
"snow": Object {
|
||||
"chances": 0,
|
||||
},
|
||||
"sun": Object {
|
||||
"chances": 5,
|
||||
},
|
||||
},
|
||||
"versions": Array [
|
||||
Object {
|
||||
"cluster_name": "test",
|
||||
"cluster_stats": Object {
|
||||
"nodes": Object {
|
||||
"usage": Object {
|
||||
"nodes": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"version": "8675309",
|
||||
"aggregations": Object {
|
||||
"terms": Object {
|
||||
"bytes": 2,
|
||||
},
|
||||
},
|
||||
"node_id": "some_node_id",
|
||||
"rest_actions": Object {
|
||||
"nodes_usage_action": 1,
|
||||
},
|
||||
"since": 1588616945163,
|
||||
"timestamp": 1588617023177,
|
||||
},
|
||||
],
|
||||
},
|
||||
"xpack": Object {},
|
||||
},
|
||||
"version": "8.0.0",
|
||||
},
|
||||
]
|
||||
"cluster_uuid": "test",
|
||||
"collection": "local",
|
||||
"stack_stats": Object {
|
||||
"data": Array [],
|
||||
"kibana": Object {
|
||||
"count": 1,
|
||||
"great": "googlymoogly",
|
||||
"indices": 1,
|
||||
"os": Object {
|
||||
"platformReleases": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platformRelease": "iv",
|
||||
},
|
||||
],
|
||||
"platforms": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"platform": "rocky",
|
||||
},
|
||||
],
|
||||
},
|
||||
"plugins": Object {
|
||||
"clouds": Object {
|
||||
"chances": 95,
|
||||
},
|
||||
"localization": Object {
|
||||
"integrities": Object {},
|
||||
"labelsCount": 0,
|
||||
"locale": "en",
|
||||
},
|
||||
"rain": Object {
|
||||
"chances": 2,
|
||||
},
|
||||
"snow": Object {
|
||||
"chances": 0,
|
||||
},
|
||||
"sun": Object {
|
||||
"chances": 5,
|
||||
},
|
||||
},
|
||||
"versions": Array [
|
||||
Object {
|
||||
"count": 1,
|
||||
"version": "8675309",
|
||||
},
|
||||
],
|
||||
},
|
||||
"xpack": Object {},
|
||||
},
|
||||
"timestamp": Any<String>,
|
||||
"version": "8.0.0",
|
||||
}
|
||||
`;
|
||||
|
|
|
@ -85,7 +85,11 @@ describe('Telemetry Collection: Get Aggregated Stats', () => {
|
|||
} as any,
|
||||
context
|
||||
);
|
||||
expect(stats.map(({ timestamp, ...rest }) => rest)).toMatchSnapshot();
|
||||
stats.forEach((entry) => {
|
||||
expect(entry).toMatchSnapshot({
|
||||
timestamp: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
test('X-Pack telemetry (license + X-Pack)', async () => {
|
||||
|
@ -123,6 +127,10 @@ describe('Telemetry Collection: Get Aggregated Stats', () => {
|
|||
} as any,
|
||||
context
|
||||
);
|
||||
expect(stats.map(({ timestamp, ...rest }) => rest)).toMatchSnapshot();
|
||||
stats.forEach((entry) => {
|
||||
expect(entry).toMatchSnapshot({
|
||||
timestamp: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -153,9 +153,7 @@
|
|||
"min": 223
|
||||
}
|
||||
},
|
||||
"versions": [
|
||||
"6.3.1"
|
||||
]
|
||||
"versions": ["6.3.1"]
|
||||
},
|
||||
"status": "yellow",
|
||||
"timestamp": 1532386499084
|
||||
|
@ -297,9 +295,7 @@
|
|||
},
|
||||
"audit": {
|
||||
"enabled": false,
|
||||
"outputs": [
|
||||
"logfile"
|
||||
]
|
||||
"outputs": ["logfile"]
|
||||
},
|
||||
"available": false,
|
||||
"enabled": true,
|
||||
|
|
|
@ -92,9 +92,7 @@
|
|||
"master": 1,
|
||||
"ingest": 1
|
||||
},
|
||||
"versions": [
|
||||
"7.0.0-alpha1"
|
||||
],
|
||||
"versions": ["7.0.0-alpha1"],
|
||||
"os": {
|
||||
"available_processors": 4,
|
||||
"allocated_processors": 1,
|
||||
|
@ -214,9 +212,7 @@
|
|||
}
|
||||
},
|
||||
"audit": {
|
||||
"outputs": [
|
||||
"logfile"
|
||||
],
|
||||
"outputs": ["logfile"],
|
||||
"enabled": false
|
||||
},
|
||||
"ipfilter": {
|
||||
|
@ -383,9 +379,7 @@
|
|||
"master": 1,
|
||||
"ingest": 1
|
||||
},
|
||||
"versions": [
|
||||
"7.0.0-alpha1"
|
||||
],
|
||||
"versions": ["7.0.0-alpha1"],
|
||||
"os": {
|
||||
"available_processors": 4,
|
||||
"allocated_processors": 1,
|
||||
|
@ -461,34 +455,22 @@
|
|||
"enabled": true,
|
||||
"realms": {
|
||||
"file": {
|
||||
"name": [
|
||||
"default_file"
|
||||
],
|
||||
"name": ["default_file"],
|
||||
"available": true,
|
||||
"size": [
|
||||
0
|
||||
],
|
||||
"size": [0],
|
||||
"enabled": true,
|
||||
"order": [
|
||||
2147483647
|
||||
]
|
||||
"order": [2147483647]
|
||||
},
|
||||
"ldap": {
|
||||
"available": true,
|
||||
"enabled": false
|
||||
},
|
||||
"native": {
|
||||
"name": [
|
||||
"default_native"
|
||||
],
|
||||
"name": ["default_native"],
|
||||
"available": true,
|
||||
"size": [
|
||||
2
|
||||
],
|
||||
"size": [2],
|
||||
"enabled": true,
|
||||
"order": [
|
||||
2147483647
|
||||
]
|
||||
"order": [2147483647]
|
||||
},
|
||||
"active_directory": {
|
||||
"available": true,
|
||||
|
@ -523,9 +505,7 @@
|
|||
}
|
||||
},
|
||||
"audit": {
|
||||
"outputs": [
|
||||
"logfile"
|
||||
],
|
||||
"outputs": ["logfile"],
|
||||
"enabled": false
|
||||
},
|
||||
"ipfilter": {
|
||||
|
@ -700,9 +680,7 @@
|
|||
"master": 2,
|
||||
"ingest": 2
|
||||
},
|
||||
"versions": [
|
||||
"7.0.0-alpha1"
|
||||
],
|
||||
"versions": ["7.0.0-alpha1"],
|
||||
"os": {
|
||||
"available_processors": 8,
|
||||
"allocated_processors": 2,
|
||||
|
@ -778,34 +756,22 @@
|
|||
"enabled": true,
|
||||
"realms": {
|
||||
"file": {
|
||||
"name": [
|
||||
"default_file"
|
||||
],
|
||||
"name": ["default_file"],
|
||||
"available": true,
|
||||
"size": [
|
||||
0
|
||||
],
|
||||
"size": [0],
|
||||
"enabled": true,
|
||||
"order": [
|
||||
2147483647
|
||||
]
|
||||
"order": [2147483647]
|
||||
},
|
||||
"ldap": {
|
||||
"available": true,
|
||||
"enabled": false
|
||||
},
|
||||
"native": {
|
||||
"name": [
|
||||
"default_native"
|
||||
],
|
||||
"name": ["default_native"],
|
||||
"available": true,
|
||||
"size": [
|
||||
1
|
||||
],
|
||||
"size": [1],
|
||||
"enabled": true,
|
||||
"order": [
|
||||
2147483647
|
||||
]
|
||||
"order": [2147483647]
|
||||
},
|
||||
"active_directory": {
|
||||
"available": true,
|
||||
|
@ -840,9 +806,7 @@
|
|||
}
|
||||
},
|
||||
"audit": {
|
||||
"outputs": [
|
||||
"logfile"
|
||||
],
|
||||
"outputs": ["logfile"],
|
||||
"enabled": false
|
||||
},
|
||||
"ipfilter": {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue