mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 17:28:26 -04:00
[Response Ops] Onboard metric threshold rule type to use framework alerts as data (#166664)
Resolves https://github.com/elastic/kibana/issues/164220 ## Summary Removes the lifecycle executor wrapper around the metric threshold rule type executor so that this rule type is using the framework alerts client to write alerts as data documents. ### Response ops changes - Passing in task `startedAt` date to the alerts client. Lifecycle executor rules use this standardized timestamp for the `@timestamp` field of the AaD doc, as well as for the start and end time of an alert ### Metric threshold rule changes - Switch to using the alerts client in the executor to report alerts and to get recovered alert information. --------- Co-authored-by: Kibana Machine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
parent
2f80ca2ef8
commit
f4dda26792
25 changed files with 1224 additions and 727 deletions
|
@ -150,7 +150,7 @@ const generateSchemaLines = ({
|
|||
break;
|
||||
case 'object':
|
||||
case 'nested':
|
||||
if (!isEnabled) {
|
||||
if (!isEnabled || !isArray) {
|
||||
lineWriter.addLine(`${keyToWrite}: ${getSchemaDefinition('schemaUnknown', isArray)},`);
|
||||
} else if (isArray && null != fieldMap.properties) {
|
||||
lineWriter.addLineAndIndent(`${keyToWrite}: rt.array(`);
|
||||
|
|
|
@ -152,6 +152,7 @@ const EcsOptional = rt.partial({
|
|||
'container.image.hash.all': schemaStringArray,
|
||||
'container.image.name': schemaString,
|
||||
'container.image.tag': schemaStringArray,
|
||||
'container.labels': schemaUnknown,
|
||||
'container.memory.usage': schemaStringOrNumber,
|
||||
'container.name': schemaString,
|
||||
'container.network.egress.bytes': schemaStringOrNumber,
|
||||
|
@ -307,6 +308,7 @@ const EcsOptional = rt.partial({
|
|||
'faas.execution': schemaString,
|
||||
'faas.id': schemaString,
|
||||
'faas.name': schemaString,
|
||||
'faas.trigger': schemaUnknown,
|
||||
'faas.version': schemaString,
|
||||
'file.accessed': schemaDate,
|
||||
'file.attributes': schemaStringArray,
|
||||
|
@ -471,12 +473,14 @@ const EcsOptional = rt.partial({
|
|||
'http.response.mime_type': schemaString,
|
||||
'http.response.status_code': schemaStringOrNumber,
|
||||
'http.version': schemaString,
|
||||
labels: schemaUnknown,
|
||||
'log.file.path': schemaString,
|
||||
'log.level': schemaString,
|
||||
'log.logger': schemaString,
|
||||
'log.origin.file.line': schemaStringOrNumber,
|
||||
'log.origin.file.name': schemaString,
|
||||
'log.origin.function': schemaString,
|
||||
'log.syslog': schemaUnknown,
|
||||
message: schemaString,
|
||||
'network.application': schemaString,
|
||||
'network.bytes': schemaStringOrNumber,
|
||||
|
@ -484,6 +488,7 @@ const EcsOptional = rt.partial({
|
|||
'network.direction': schemaString,
|
||||
'network.forwarded_ip': schemaString,
|
||||
'network.iana_number': schemaString,
|
||||
'network.inner': schemaUnknown,
|
||||
'network.name': schemaString,
|
||||
'network.packets': schemaStringOrNumber,
|
||||
'network.protocol': schemaString,
|
||||
|
@ -491,6 +496,7 @@ const EcsOptional = rt.partial({
|
|||
'network.type': schemaString,
|
||||
'network.vlan.id': schemaString,
|
||||
'network.vlan.name': schemaString,
|
||||
'observer.egress': schemaUnknown,
|
||||
'observer.geo.city_name': schemaString,
|
||||
'observer.geo.continent_code': schemaString,
|
||||
'observer.geo.continent_name': schemaString,
|
||||
|
@ -503,6 +509,7 @@ const EcsOptional = rt.partial({
|
|||
'observer.geo.region_name': schemaString,
|
||||
'observer.geo.timezone': schemaString,
|
||||
'observer.hostname': schemaString,
|
||||
'observer.ingress': schemaUnknown,
|
||||
'observer.ip': schemaStringArray,
|
||||
'observer.mac': schemaStringArray,
|
||||
'observer.name': schemaString,
|
||||
|
@ -628,6 +635,7 @@ const EcsOptional = rt.partial({
|
|||
'process.entry_leader.start': schemaDate,
|
||||
'process.entry_leader.supplemental_groups.id': schemaString,
|
||||
'process.entry_leader.supplemental_groups.name': schemaString,
|
||||
'process.entry_leader.tty': schemaUnknown,
|
||||
'process.entry_leader.user.id': schemaString,
|
||||
'process.entry_leader.user.name': schemaString,
|
||||
'process.entry_leader.working_directory': schemaString,
|
||||
|
@ -656,6 +664,7 @@ const EcsOptional = rt.partial({
|
|||
'process.group_leader.start': schemaDate,
|
||||
'process.group_leader.supplemental_groups.id': schemaString,
|
||||
'process.group_leader.supplemental_groups.name': schemaString,
|
||||
'process.group_leader.tty': schemaUnknown,
|
||||
'process.group_leader.user.id': schemaString,
|
||||
'process.group_leader.user.name': schemaString,
|
||||
'process.group_leader.working_directory': schemaString,
|
||||
|
@ -667,6 +676,7 @@ const EcsOptional = rt.partial({
|
|||
'process.hash.ssdeep': schemaString,
|
||||
'process.hash.tlsh': schemaString,
|
||||
'process.interactive': schemaBoolean,
|
||||
'process.io': schemaUnknown,
|
||||
'process.name': schemaString,
|
||||
'process.parent.args': schemaStringArray,
|
||||
'process.parent.args_count': schemaStringOrNumber,
|
||||
|
@ -757,6 +767,7 @@ const EcsOptional = rt.partial({
|
|||
'process.parent.thread.id': schemaStringOrNumber,
|
||||
'process.parent.thread.name': schemaString,
|
||||
'process.parent.title': schemaString,
|
||||
'process.parent.tty': schemaUnknown,
|
||||
'process.parent.uptime': schemaStringOrNumber,
|
||||
'process.parent.user.id': schemaString,
|
||||
'process.parent.user.name': schemaString,
|
||||
|
@ -810,6 +821,7 @@ const EcsOptional = rt.partial({
|
|||
'process.session_leader.start': schemaDate,
|
||||
'process.session_leader.supplemental_groups.id': schemaString,
|
||||
'process.session_leader.supplemental_groups.name': schemaString,
|
||||
'process.session_leader.tty': schemaUnknown,
|
||||
'process.session_leader.user.id': schemaString,
|
||||
'process.session_leader.user.name': schemaString,
|
||||
'process.session_leader.working_directory': schemaString,
|
||||
|
@ -819,6 +831,7 @@ const EcsOptional = rt.partial({
|
|||
'process.thread.id': schemaStringOrNumber,
|
||||
'process.thread.name': schemaString,
|
||||
'process.title': schemaString,
|
||||
'process.tty': schemaUnknown,
|
||||
'process.uptime': schemaStringOrNumber,
|
||||
'process.user.id': schemaString,
|
||||
'process.user.name': schemaString,
|
||||
|
@ -951,6 +964,7 @@ const EcsOptional = rt.partial({
|
|||
tags: schemaStringArray,
|
||||
'threat.enrichments': rt.array(
|
||||
rt.partial({
|
||||
indicator: schemaUnknown,
|
||||
'matched.atomic': schemaString,
|
||||
'matched.field': schemaString,
|
||||
'matched.id': schemaString,
|
||||
|
|
|
@ -73,6 +73,7 @@ const ObservabilityApmAlertOptional = rt.partial({
|
|||
'agent.name': schemaString,
|
||||
'error.grouping_key': schemaString,
|
||||
'error.grouping_name': schemaString,
|
||||
'kibana.alert.context': schemaUnknown,
|
||||
'kibana.alert.evaluation.threshold': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.value': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.values': schemaStringOrNumberArray,
|
||||
|
@ -82,6 +83,7 @@ const ObservabilityApmAlertOptional = rt.partial({
|
|||
value: schemaString,
|
||||
})
|
||||
),
|
||||
labels: schemaUnknown,
|
||||
'processor.event': schemaString,
|
||||
'service.environment': schemaString,
|
||||
'service.language.name': schemaString,
|
||||
|
|
|
@ -71,6 +71,7 @@ export const schemaGeoPointArray = rt.array(schemaGeoPoint);
|
|||
const ObservabilityLogsAlertRequired = rt.type({
|
||||
});
|
||||
const ObservabilityLogsAlertOptional = rt.partial({
|
||||
'kibana.alert.context': schemaUnknown,
|
||||
'kibana.alert.evaluation.threshold': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.value': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.values': schemaStringOrNumberArray,
|
||||
|
|
|
@ -71,6 +71,7 @@ export const schemaGeoPointArray = rt.array(schemaGeoPoint);
|
|||
const ObservabilityMetricsAlertRequired = rt.type({
|
||||
});
|
||||
const ObservabilityMetricsAlertOptional = rt.partial({
|
||||
'kibana.alert.context': schemaUnknown,
|
||||
'kibana.alert.evaluation.threshold': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.value': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.values': schemaStringOrNumberArray,
|
||||
|
|
|
@ -70,6 +70,7 @@ export const schemaGeoPointArray = rt.array(schemaGeoPoint);
|
|||
const ObservabilitySloAlertRequired = rt.type({
|
||||
});
|
||||
const ObservabilitySloAlertOptional = rt.partial({
|
||||
'kibana.alert.context': schemaUnknown,
|
||||
'kibana.alert.evaluation.threshold': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.value': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.values': schemaStringOrNumberArray,
|
||||
|
|
|
@ -74,6 +74,7 @@ const ObservabilityUptimeAlertOptional = rt.partial({
|
|||
'anomaly.bucket_span.minutes': schemaString,
|
||||
'anomaly.start': schemaDate,
|
||||
'error.message': schemaString,
|
||||
'kibana.alert.context': schemaUnknown,
|
||||
'kibana.alert.evaluation.threshold': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.value': schemaStringOrNumber,
|
||||
'kibana.alert.evaluation.values': schemaStringOrNumberArray,
|
||||
|
|
|
@ -182,6 +182,7 @@ const SecurityAlertOptional = rt.partial({
|
|||
'kibana.alert.suppression.terms.field': schemaStringArray,
|
||||
'kibana.alert.suppression.terms.value': schemaStringArray,
|
||||
'kibana.alert.system_status': schemaString,
|
||||
'kibana.alert.threshold_result.cardinality': schemaUnknown,
|
||||
'kibana.alert.threshold_result.count': schemaStringOrNumber,
|
||||
'kibana.alert.threshold_result.from': schemaDate,
|
||||
'kibana.alert.threshold_result.terms': rt.array(
|
||||
|
|
|
@ -63,6 +63,7 @@ import {
|
|||
import { getDataStreamAdapter } from '../alerts_service/lib/data_stream_adapter';
|
||||
|
||||
const date = '2023-03-28T22:27:28.159Z';
|
||||
const startedAtDate = '2023-03-28T13:00:00.000Z';
|
||||
const maxAlerts = 1000;
|
||||
let logger: ReturnType<typeof loggingSystemMock['createLogger']>;
|
||||
const clusterClient = elasticsearchServiceMock.createClusterClient().asInternalUser;
|
||||
|
@ -254,6 +255,15 @@ const getRecoveredIndexedAlertDoc = (overrides = {}) => ({
|
|||
...overrides,
|
||||
});
|
||||
|
||||
const defaultExecutionOpts = {
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
startedAt: null,
|
||||
};
|
||||
|
||||
describe('Alerts Client', () => {
|
||||
let alertsClientParams: AlertsClientParams;
|
||||
let processAndLogAlertsOpts: ProcessAndLogAlertsOpts;
|
||||
|
@ -305,15 +315,10 @@ describe('Alerts Client', () => {
|
|||
|
||||
const alertsClient = new AlertsClient(alertsClientParams);
|
||||
|
||||
const opts = {
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
};
|
||||
await alertsClient.initializeExecution(opts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(opts);
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(
|
||||
defaultExecutionOpts
|
||||
);
|
||||
|
||||
// no alerts to query for
|
||||
expect(clusterClient.search).not.toHaveBeenCalled();
|
||||
|
@ -338,15 +343,10 @@ describe('Alerts Client', () => {
|
|||
},
|
||||
});
|
||||
|
||||
const opts = {
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
};
|
||||
await alertsClient.initializeExecution(opts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(opts);
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(
|
||||
defaultExecutionOpts
|
||||
);
|
||||
expect(mockLegacyAlertsClient.getTrackedAlerts).not.toHaveBeenCalled();
|
||||
spy.mockRestore();
|
||||
});
|
||||
|
@ -362,15 +362,10 @@ describe('Alerts Client', () => {
|
|||
|
||||
const alertsClient = new AlertsClient(alertsClientParams);
|
||||
|
||||
const opts = {
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
};
|
||||
await alertsClient.initializeExecution(opts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(opts);
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(
|
||||
defaultExecutionOpts
|
||||
);
|
||||
|
||||
expect(clusterClient.search).toHaveBeenCalledWith({
|
||||
body: {
|
||||
|
@ -417,15 +412,10 @@ describe('Alerts Client', () => {
|
|||
|
||||
const alertsClient = new AlertsClient(alertsClientParams);
|
||||
|
||||
const opts = {
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
};
|
||||
await alertsClient.initializeExecution(opts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(opts);
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(
|
||||
defaultExecutionOpts
|
||||
);
|
||||
|
||||
expect(clusterClient.search).toHaveBeenCalledTimes(2);
|
||||
|
||||
|
@ -446,15 +436,10 @@ describe('Alerts Client', () => {
|
|||
|
||||
const alertsClient = new AlertsClient(alertsClientParams);
|
||||
|
||||
const opts = {
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
};
|
||||
await alertsClient.initializeExecution(opts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(opts);
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
expect(mockLegacyAlertsClient.initializeExecution).toHaveBeenCalledWith(
|
||||
defaultExecutionOpts
|
||||
);
|
||||
|
||||
expect(clusterClient.search).toHaveBeenCalledWith({
|
||||
body: {
|
||||
|
@ -489,13 +474,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
const alertExecutorService = alertsClient.factory();
|
||||
|
@ -552,13 +531,10 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report 1 new alert and 1 active alert
|
||||
|
@ -621,13 +597,10 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report 1 new alert and 1 active alert
|
||||
|
@ -740,13 +713,10 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': activeAlert,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report 1 new alert and 1 active alert
|
||||
|
@ -814,14 +784,11 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
'2': trackedAlert2Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report 1 new alert and 1 active alert, recover 1 alert
|
||||
|
@ -909,14 +876,11 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
'2': trackedAlert2Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report 1 new alert and 1 active alert, recover 1 alert
|
||||
|
@ -1032,19 +996,119 @@ describe('Alerts Client', () => {
|
|||
});
|
||||
});
|
||||
|
||||
test('should not try to index if no alerts', async () => {
|
||||
test('should use startedAt time if provided', async () => {
|
||||
clusterClient.search.mockResolvedValue({
|
||||
took: 10,
|
||||
timed_out: false,
|
||||
_shards: { failed: 0, successful: 1, total: 1, skipped: 0 },
|
||||
hits: {
|
||||
total: { relation: 'eq', value: 2 },
|
||||
hits: [
|
||||
{
|
||||
_id: 'abc',
|
||||
_index: '.internal.alerts-test.alerts-default-000001',
|
||||
_seq_no: 41,
|
||||
_primary_term: 665,
|
||||
_source: fetchedAlert1,
|
||||
},
|
||||
{
|
||||
_id: 'def',
|
||||
_index: '.internal.alerts-test.alerts-default-000002',
|
||||
_seq_no: 42,
|
||||
_primary_term: 666,
|
||||
_source: fetchedAlert2,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>(
|
||||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
'2': trackedAlert2Raw,
|
||||
},
|
||||
startedAt: new Date(startedAtDate),
|
||||
});
|
||||
|
||||
// Report 1 new alert and 1 active alert, recover 1 alert
|
||||
const alertExecutorService = alertsClient.factory();
|
||||
alertExecutorService.create('2').scheduleActions('default');
|
||||
alertExecutorService.create('3').scheduleActions('default');
|
||||
|
||||
alertsClient.processAndLogAlerts(processAndLogAlertsOpts);
|
||||
|
||||
await alertsClient.persistAlerts();
|
||||
|
||||
const { alertsToReturn } = alertsClient.getAlertsToSerialize();
|
||||
const uuid3 = alertsToReturn['3'].meta?.uuid;
|
||||
|
||||
expect(clusterClient.bulk).toHaveBeenCalledWith({
|
||||
index: '.alerts-test.alerts-default',
|
||||
refresh: true,
|
||||
require_alias: !useDataStreamForAlerts,
|
||||
body: [
|
||||
{
|
||||
index: {
|
||||
_id: 'def',
|
||||
_index: '.internal.alerts-test.alerts-default-000002',
|
||||
if_seq_no: 42,
|
||||
if_primary_term: 666,
|
||||
require_alias: false,
|
||||
},
|
||||
},
|
||||
// ongoing alert doc
|
||||
getOngoingIndexedAlertDoc({
|
||||
[TIMESTAMP]: startedAtDate,
|
||||
[ALERT_UUID]: 'def',
|
||||
[ALERT_INSTANCE_ID]: '2',
|
||||
[ALERT_FLAPPING_HISTORY]: [true, false, false, false],
|
||||
[ALERT_DURATION]: 37951841000,
|
||||
[ALERT_START]: '2023-03-28T02:27:28.159Z',
|
||||
[ALERT_TIME_RANGE]: { gte: '2023-03-28T02:27:28.159Z' },
|
||||
}),
|
||||
{
|
||||
create: { _id: uuid3, ...(useDataStreamForAlerts ? {} : { require_alias: true }) },
|
||||
},
|
||||
// new alert doc
|
||||
getNewIndexedAlertDoc({
|
||||
[TIMESTAMP]: startedAtDate,
|
||||
[ALERT_UUID]: uuid3,
|
||||
[ALERT_INSTANCE_ID]: '3',
|
||||
[ALERT_START]: startedAtDate,
|
||||
[ALERT_TIME_RANGE]: { gte: startedAtDate },
|
||||
}),
|
||||
{
|
||||
index: {
|
||||
_id: 'abc',
|
||||
_index: '.internal.alerts-test.alerts-default-000001',
|
||||
if_seq_no: 41,
|
||||
if_primary_term: 665,
|
||||
require_alias: false,
|
||||
},
|
||||
},
|
||||
// recovered alert doc
|
||||
getRecoveredIndexedAlertDoc({
|
||||
[TIMESTAMP]: startedAtDate,
|
||||
[ALERT_DURATION]: 1951841000,
|
||||
[ALERT_UUID]: 'abc',
|
||||
[ALERT_END]: startedAtDate,
|
||||
[ALERT_TIME_RANGE]: { gte: '2023-03-28T12:27:28.159Z', lte: startedAtDate },
|
||||
}),
|
||||
],
|
||||
});
|
||||
});
|
||||
|
||||
test('should not try to index if no alerts', async () => {
|
||||
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>(
|
||||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report no alerts
|
||||
|
||||
alertsClient.processAndLogAlerts(processAndLogAlertsOpts);
|
||||
|
@ -1087,13 +1151,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
const alertExecutorService = alertsClient.factory();
|
||||
|
@ -1140,14 +1198,11 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
'2': trackedAlert2Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report 2 active alerts
|
||||
|
@ -1198,13 +1253,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
const alertExecutorService = alertsClient.factory();
|
||||
|
@ -1624,13 +1673,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
const { uuid: uuid1, start: start1 } = alertsClient.report({
|
||||
|
@ -1662,6 +1705,63 @@ describe('Alerts Client', () => {
|
|||
expect(start2).toBeNull();
|
||||
});
|
||||
|
||||
test('should use startedAt time if provided', async () => {
|
||||
const mockGetUuidCurrent = jest
|
||||
.fn()
|
||||
.mockReturnValueOnce('uuid1')
|
||||
.mockReturnValueOnce('uuid2');
|
||||
const mockGetStartCurrent = jest.fn().mockReturnValue(null);
|
||||
const mockScheduleActionsCurrent = jest.fn().mockImplementation(() => ({
|
||||
replaceState: mockReplaceState,
|
||||
getUuid: mockGetUuidCurrent,
|
||||
getStart: mockGetStartCurrent,
|
||||
}));
|
||||
const mockCreateCurrent = jest.fn().mockImplementation(() => ({
|
||||
scheduleActions: mockScheduleActionsCurrent,
|
||||
}));
|
||||
mockLegacyAlertsClient.factory.mockImplementation(() => ({ create: mockCreateCurrent }));
|
||||
const spy = jest
|
||||
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
|
||||
.mockImplementation(() => mockLegacyAlertsClient);
|
||||
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>(
|
||||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
...defaultExecutionOpts,
|
||||
startedAt: new Date(startedAtDate),
|
||||
});
|
||||
|
||||
// Report 2 new alerts
|
||||
const { uuid: uuid1, start: start1 } = alertsClient.report({
|
||||
id: '1',
|
||||
actionGroup: 'default',
|
||||
state: {},
|
||||
context: {},
|
||||
});
|
||||
const { uuid: uuid2, start: start2 } = alertsClient.report({
|
||||
id: '2',
|
||||
actionGroup: 'default',
|
||||
state: {},
|
||||
context: {},
|
||||
});
|
||||
|
||||
expect(mockCreateCurrent).toHaveBeenCalledTimes(2);
|
||||
expect(mockCreateCurrent).toHaveBeenNthCalledWith(1, '1');
|
||||
expect(mockCreateCurrent).toHaveBeenNthCalledWith(2, '2');
|
||||
expect(mockScheduleActionsCurrent).toHaveBeenCalledTimes(2);
|
||||
expect(mockScheduleActionsCurrent).toHaveBeenNthCalledWith(1, 'default', {});
|
||||
expect(mockScheduleActionsCurrent).toHaveBeenNthCalledWith(2, 'default', {});
|
||||
|
||||
expect(mockReplaceState).not.toHaveBeenCalled();
|
||||
spy.mockRestore();
|
||||
|
||||
expect(uuid1).toEqual('uuid1');
|
||||
expect(uuid2).toEqual('uuid2');
|
||||
expect(start1).toEqual(startedAtDate);
|
||||
expect(start2).toEqual(startedAtDate);
|
||||
});
|
||||
|
||||
test('should set context if defined', async () => {
|
||||
mockLegacyAlertsClient.factory.mockImplementation(() => ({ create: mockCreate }));
|
||||
const spy = jest
|
||||
|
@ -1671,13 +1771,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
alertsClient.report({
|
||||
|
@ -1708,13 +1802,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
alertsClient.report({
|
||||
|
@ -1751,13 +1839,7 @@ describe('Alerts Client', () => {
|
|||
'recovered'
|
||||
>(alertsClientParams);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report 2 new alerts
|
||||
alertsClient.report({
|
||||
|
@ -1881,14 +1963,11 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
'2': trackedAlert2Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Set context on 2 recovered alerts
|
||||
|
@ -1911,14 +1990,11 @@ describe('Alerts Client', () => {
|
|||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
'2': trackedAlert2Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Set context on 2 recovered alerts
|
||||
|
@ -1948,13 +2024,7 @@ describe('Alerts Client', () => {
|
|||
'recovered'
|
||||
>(alertsClientParams);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
// Report new alert
|
||||
alertsClient.report({
|
||||
|
@ -2050,13 +2120,10 @@ describe('Alerts Client', () => {
|
|||
>(alertsClientParams);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Report ongoing alert
|
||||
|
@ -2155,13 +2222,10 @@ describe('Alerts Client', () => {
|
|||
>(alertsClientParams);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// Don't report any alerts so existing alert recovers
|
||||
|
@ -2234,13 +2298,7 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
const publicAlertsClient = alertsClient.client();
|
||||
|
||||
|
@ -2275,13 +2333,10 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// report no alerts to allow existing alert to recover
|
||||
|
@ -2310,13 +2365,10 @@ describe('Alerts Client', () => {
|
|||
alertsClientParams
|
||||
);
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
...defaultExecutionOpts,
|
||||
activeAlertsFromState: {
|
||||
'1': trackedAlert1Raw,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
|
||||
// report no alerts to allow existing alert to recover
|
||||
|
|
|
@ -86,6 +86,7 @@ export class AlertsClient<
|
|||
primaryTerm: Record<string, number | undefined>;
|
||||
};
|
||||
|
||||
private startedAtString: string | null = null;
|
||||
private rule: AlertRule;
|
||||
private ruleType: UntypedNormalizedRuleType;
|
||||
|
||||
|
@ -114,6 +115,7 @@ export class AlertsClient<
|
|||
}
|
||||
|
||||
public async initializeExecution(opts: InitializeExecutionOpts) {
|
||||
this.startedAtString = opts.startedAt ? opts.startedAt.toISOString() : null;
|
||||
await this.legacyAlertsClient.initializeExecution(opts);
|
||||
|
||||
if (!this.ruleType.alerts?.shouldWrite) {
|
||||
|
@ -227,7 +229,7 @@ export class AlertsClient<
|
|||
|
||||
return {
|
||||
uuid: legacyAlert.getUuid(),
|
||||
start: legacyAlert.getStart(),
|
||||
start: legacyAlert.getStart() ?? this.startedAtString,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -280,7 +282,7 @@ export class AlertsClient<
|
|||
);
|
||||
return;
|
||||
}
|
||||
const currentTime = new Date().toISOString();
|
||||
const currentTime = this.startedAtString ?? new Date().toISOString();
|
||||
const esClient = await this.options.elasticsearchClientPromise;
|
||||
|
||||
const { alertsToReturn, recoveredAlertsToReturn } =
|
||||
|
|
|
@ -118,6 +118,18 @@ const testAlert2 = {
|
|||
},
|
||||
};
|
||||
|
||||
const defaultExecutionOpts = {
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `test: rule-name`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
startedAt: null,
|
||||
};
|
||||
|
||||
describe('Legacy Alerts Client', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
@ -130,16 +142,7 @@ describe('Legacy Alerts Client', () => {
|
|||
ruleType,
|
||||
});
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `test: my-test-rule`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
expect(createAlertFactory).toHaveBeenCalledWith({
|
||||
alerts: {
|
||||
|
@ -159,16 +162,7 @@ describe('Legacy Alerts Client', () => {
|
|||
ruleType,
|
||||
});
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `test: my-test-rule`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
alertsClient.factory();
|
||||
expect(getPublicAlertFactory).toHaveBeenCalledWith(mockCreateAlertFactory);
|
||||
|
@ -180,16 +174,7 @@ describe('Legacy Alerts Client', () => {
|
|||
ruleType,
|
||||
});
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `test: my-test-rule`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
alertsClient.getAlert('1');
|
||||
expect(mockCreateAlertFactory.get).toHaveBeenCalledWith('1');
|
||||
|
@ -201,16 +186,7 @@ describe('Legacy Alerts Client', () => {
|
|||
ruleType,
|
||||
});
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `test: my-test-rule`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
alertsClient.checkLimitUsage();
|
||||
expect(mockCreateAlertFactory.alertLimit.checkLimitUsage).toHaveBeenCalled();
|
||||
|
@ -222,16 +198,7 @@ describe('Legacy Alerts Client', () => {
|
|||
ruleType,
|
||||
});
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `test: my-test-rule`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
alertsClient.hasReachedAlertLimit();
|
||||
expect(mockCreateAlertFactory.hasReachedAlertLimit).toHaveBeenCalled();
|
||||
|
@ -269,16 +236,7 @@ describe('Legacy Alerts Client', () => {
|
|||
ruleType,
|
||||
});
|
||||
|
||||
await alertsClient.initializeExecution({
|
||||
maxAlerts: 1000,
|
||||
ruleLabel: `ruleLogPrefix`,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
activeAlertsFromState: {
|
||||
'1': testAlert1,
|
||||
'2': testAlert2,
|
||||
},
|
||||
recoveredAlertsFromState: {},
|
||||
});
|
||||
await alertsClient.initializeExecution(defaultExecutionOpts);
|
||||
|
||||
alertsClient.processAndLogAlerts({
|
||||
eventLogger: alertingEventLogger,
|
||||
|
@ -304,6 +262,7 @@ describe('Legacy Alerts Client', () => {
|
|||
autoRecoverAlerts: true,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
maintenanceWindowIds: ['window-id1', 'window-id2'],
|
||||
startedAt: null,
|
||||
});
|
||||
|
||||
expect(trimRecoveredAlerts).toHaveBeenCalledWith(logger, {}, 1000);
|
||||
|
@ -334,7 +293,7 @@ describe('Legacy Alerts Client', () => {
|
|||
'2': new Alert<AlertInstanceContext, AlertInstanceContext>('2', testAlert2),
|
||||
},
|
||||
recoveredAlerts: {},
|
||||
ruleLogPrefix: 'ruleLogPrefix',
|
||||
ruleLogPrefix: 'test: rule-name',
|
||||
ruleRunMetricsStore,
|
||||
canSetRecoveryContext: false,
|
||||
shouldPersistAlerts: true,
|
||||
|
|
|
@ -49,6 +49,7 @@ export class LegacyAlertsClient<
|
|||
private maxAlerts: number = DEFAULT_MAX_ALERTS;
|
||||
private flappingSettings: RulesSettingsFlappingProperties = DEFAULT_FLAPPING_SETTINGS;
|
||||
private ruleLogPrefix: string = '';
|
||||
private startedAtString: string | null = null;
|
||||
|
||||
// Alerts from the previous execution that are deserialized from the task state
|
||||
private trackedAlerts: TrackedAlerts<State, Context> = {
|
||||
|
@ -86,6 +87,7 @@ export class LegacyAlertsClient<
|
|||
public async initializeExecution({
|
||||
maxAlerts,
|
||||
ruleLabel,
|
||||
startedAt,
|
||||
flappingSettings,
|
||||
activeAlertsFromState,
|
||||
recoveredAlertsFromState,
|
||||
|
@ -93,6 +95,7 @@ export class LegacyAlertsClient<
|
|||
this.maxAlerts = maxAlerts;
|
||||
this.flappingSettings = flappingSettings;
|
||||
this.ruleLogPrefix = ruleLabel;
|
||||
this.startedAtString = startedAt ? startedAt.toISOString() : null;
|
||||
|
||||
for (const id of keys(activeAlertsFromState)) {
|
||||
this.trackedAlerts.active[id] = new Alert<State, Context>(id, activeAlertsFromState[id]);
|
||||
|
@ -153,6 +156,7 @@ export class LegacyAlertsClient<
|
|||
autoRecoverAlerts: this.options.ruleType.autoRecoverAlerts ?? true,
|
||||
flappingSettings,
|
||||
maintenanceWindowIds,
|
||||
startedAt: this.startedAtString,
|
||||
});
|
||||
|
||||
const { trimmedAlertsRecovered, earlyRecoveredAlerts } = trimRecoveredAlerts(
|
||||
|
|
|
@ -106,6 +106,7 @@ export interface ProcessAndLogAlertsOpts {
|
|||
export interface InitializeExecutionOpts {
|
||||
maxAlerts: number;
|
||||
ruleLabel: string;
|
||||
startedAt: Date | null;
|
||||
flappingSettings: RulesSettingsFlappingProperties;
|
||||
activeAlertsFromState: Record<string, RawAlertInstance>;
|
||||
recoveredAlertsFromState: Record<string, RawAlertInstance>;
|
||||
|
|
|
@ -900,6 +900,18 @@ describe('Alerts Service', () => {
|
|||
);
|
||||
});
|
||||
|
||||
test('should allow same context with different "shouldWrite" option', async () => {
|
||||
alertsService.register(TestRegistrationContext);
|
||||
alertsService.register({
|
||||
...TestRegistrationContext,
|
||||
shouldWrite: false,
|
||||
});
|
||||
|
||||
expect(logger.debug).toHaveBeenCalledWith(
|
||||
`Resources for context "test" have already been registered.`
|
||||
);
|
||||
});
|
||||
|
||||
test('should not update index template if simulating template throws error', async () => {
|
||||
clusterClient.indices.simulateTemplate.mockRejectedValueOnce(new Error('fail'));
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { isEmpty, isEqual } from 'lodash';
|
||||
import { isEmpty, isEqual, omit } from 'lodash';
|
||||
import { Logger, ElasticsearchClient } from '@kbn/core/server';
|
||||
import { Observable } from 'rxjs';
|
||||
import { alertFieldMap, ecsFieldMap, legacyAlertFieldMap } from '@kbn/alerts-as-data-utils';
|
||||
|
@ -275,7 +275,7 @@ export class AlertsService implements IAlertsService {
|
|||
// check whether this context has been registered before
|
||||
if (this.registeredContexts.has(context)) {
|
||||
const registeredOptions = this.registeredContexts.get(context);
|
||||
if (!isEqual(opts, registeredOptions)) {
|
||||
if (!isEqual(omit(opts, 'shouldWrite'), omit(registeredOptions, 'shouldWrite'))) {
|
||||
throw new Error(`${context} has already been registered with different options`);
|
||||
}
|
||||
this.options.logger.debug(`Resources for context "${context}" have already been registered.`);
|
||||
|
|
|
@ -117,6 +117,58 @@ describe('processAlerts', () => {
|
|||
expect(newAlert2State.end).not.toBeDefined();
|
||||
});
|
||||
|
||||
test('sets start time with startedAt in new alert state if provided', () => {
|
||||
const newAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('1');
|
||||
const newAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
|
||||
const existingAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('3');
|
||||
const existingAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('4');
|
||||
|
||||
const existingAlerts = {
|
||||
'3': existingAlert1,
|
||||
'4': existingAlert2,
|
||||
};
|
||||
|
||||
const updatedAlerts = {
|
||||
...cloneDeep(existingAlerts),
|
||||
'1': newAlert1,
|
||||
'2': newAlert2,
|
||||
};
|
||||
|
||||
updatedAlerts['1'].scheduleActions('default' as never, { foo: '1' });
|
||||
updatedAlerts['2'].scheduleActions('default' as never, { foo: '1' });
|
||||
updatedAlerts['3'].scheduleActions('default' as never, { foo: '1' });
|
||||
updatedAlerts['4'].scheduleActions('default' as never, { foo: '2' });
|
||||
|
||||
expect(newAlert1.getState()).toStrictEqual({});
|
||||
expect(newAlert2.getState()).toStrictEqual({});
|
||||
|
||||
const { newAlerts } = processAlerts({
|
||||
alerts: updatedAlerts,
|
||||
existingAlerts,
|
||||
previouslyRecoveredAlerts: {},
|
||||
hasReachedAlertLimit: false,
|
||||
alertLimit: 10,
|
||||
autoRecoverAlerts: true,
|
||||
flappingSettings: DISABLE_FLAPPING_SETTINGS,
|
||||
maintenanceWindowIds: [],
|
||||
startedAt: '2023-10-03T20:03:08.716Z',
|
||||
});
|
||||
|
||||
expect(newAlerts).toEqual({ '1': newAlert1, '2': newAlert2 });
|
||||
|
||||
const newAlert1State = newAlerts['1'].getState();
|
||||
const newAlert2State = newAlerts['2'].getState();
|
||||
|
||||
expect(newAlert1State.start).toEqual('2023-10-03T20:03:08.716Z');
|
||||
expect(newAlert2State.start).toEqual('2023-10-03T20:03:08.716Z');
|
||||
|
||||
expect(newAlert1State.duration).toEqual('0');
|
||||
expect(newAlert2State.duration).toEqual('0');
|
||||
|
||||
expect(newAlert1State.end).not.toBeDefined();
|
||||
expect(newAlert2State.end).not.toBeDefined();
|
||||
});
|
||||
|
||||
test('sets maintenance window IDs in new alert state', () => {
|
||||
const newAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('1');
|
||||
const newAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
|
||||
|
@ -481,6 +533,64 @@ describe('processAlerts', () => {
|
|||
expect(previouslyRecoveredAlert2State.end).not.toBeDefined();
|
||||
});
|
||||
|
||||
test('sets start time with startedAt in active alert state if alert was previously recovered', () => {
|
||||
const previouslyRecoveredAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('1');
|
||||
const previouslyRecoveredAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
|
||||
const existingAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('3');
|
||||
const existingAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('4');
|
||||
|
||||
const existingAlerts = {
|
||||
'3': existingAlert1,
|
||||
'4': existingAlert2,
|
||||
};
|
||||
|
||||
const previouslyRecoveredAlerts = {
|
||||
'1': previouslyRecoveredAlert1,
|
||||
'2': previouslyRecoveredAlert2,
|
||||
};
|
||||
|
||||
const updatedAlerts = {
|
||||
...cloneDeep(existingAlerts),
|
||||
...cloneDeep(previouslyRecoveredAlerts),
|
||||
};
|
||||
|
||||
updatedAlerts['1'].scheduleActions('default' as never, { foo: '1' });
|
||||
updatedAlerts['2'].scheduleActions('default' as never, { foo: '1' });
|
||||
updatedAlerts['3'].scheduleActions('default' as never, { foo: '1' });
|
||||
updatedAlerts['4'].scheduleActions('default' as never, { foo: '2' });
|
||||
|
||||
expect(updatedAlerts['1'].getState()).toStrictEqual({});
|
||||
expect(updatedAlerts['2'].getState()).toStrictEqual({});
|
||||
|
||||
const { activeAlerts } = processAlerts({
|
||||
alerts: updatedAlerts,
|
||||
existingAlerts,
|
||||
previouslyRecoveredAlerts,
|
||||
hasReachedAlertLimit: false,
|
||||
alertLimit: 10,
|
||||
autoRecoverAlerts: true,
|
||||
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
|
||||
maintenanceWindowIds: [],
|
||||
startedAt: '2023-10-03T20:03:08.716Z',
|
||||
});
|
||||
|
||||
expect(
|
||||
Object.keys(activeAlerts).map((id) => ({ [id]: activeAlerts[id].getFlappingHistory() }))
|
||||
).toEqual([{ '1': [true] }, { '2': [true] }, { '3': [false] }, { '4': [false] }]);
|
||||
|
||||
const previouslyRecoveredAlert1State = activeAlerts['1'].getState();
|
||||
const previouslyRecoveredAlert2State = activeAlerts['2'].getState();
|
||||
|
||||
expect(previouslyRecoveredAlert1State.start).toEqual('2023-10-03T20:03:08.716Z');
|
||||
expect(previouslyRecoveredAlert2State.start).toEqual('2023-10-03T20:03:08.716Z');
|
||||
|
||||
expect(previouslyRecoveredAlert1State.duration).toEqual('0');
|
||||
expect(previouslyRecoveredAlert2State.duration).toEqual('0');
|
||||
|
||||
expect(previouslyRecoveredAlert1State.end).not.toBeDefined();
|
||||
expect(previouslyRecoveredAlert2State.end).not.toBeDefined();
|
||||
});
|
||||
|
||||
test('should not set maintenance window IDs for active alerts', () => {
|
||||
const newAlert = new Alert<AlertInstanceState, AlertInstanceContext>('1');
|
||||
const existingAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
|
||||
|
@ -614,6 +724,50 @@ describe('processAlerts', () => {
|
|||
expect(recoveredAlert2State.end).toEqual('1970-01-01T00:00:00.000Z');
|
||||
});
|
||||
|
||||
test('updates duration in recovered alerts if start is available and adds end time to startedAt if provided', () => {
|
||||
const activeAlert = new Alert<AlertInstanceState, AlertInstanceContext>('1');
|
||||
const recoveredAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
|
||||
const recoveredAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('3');
|
||||
|
||||
const existingAlerts = {
|
||||
'1': activeAlert,
|
||||
'2': recoveredAlert1,
|
||||
'3': recoveredAlert2,
|
||||
};
|
||||
existingAlerts['2'].replaceState({ start: '1969-12-30T00:00:00.000Z', duration: 33000 });
|
||||
existingAlerts['3'].replaceState({ start: '1969-12-31T07:34:00.000Z', duration: 23532 });
|
||||
|
||||
const updatedAlerts = cloneDeep(existingAlerts);
|
||||
|
||||
updatedAlerts['1'].scheduleActions('default' as never, { foo: '1' });
|
||||
|
||||
const { recoveredAlerts } = processAlerts({
|
||||
alerts: updatedAlerts,
|
||||
existingAlerts,
|
||||
previouslyRecoveredAlerts: {},
|
||||
hasReachedAlertLimit: false,
|
||||
alertLimit: 10,
|
||||
autoRecoverAlerts: true,
|
||||
flappingSettings: DISABLE_FLAPPING_SETTINGS,
|
||||
maintenanceWindowIds: [],
|
||||
startedAt: '2023-10-03T20:03:08.716Z',
|
||||
});
|
||||
|
||||
expect(recoveredAlerts).toEqual({ '2': updatedAlerts['2'], '3': updatedAlerts['3'] });
|
||||
|
||||
const recoveredAlert1State = recoveredAlerts['2'].getState();
|
||||
const recoveredAlert2State = recoveredAlerts['3'].getState();
|
||||
|
||||
expect(recoveredAlert1State.start).toEqual('1969-12-30T00:00:00.000Z');
|
||||
expect(recoveredAlert2State.start).toEqual('1969-12-31T07:34:00.000Z');
|
||||
|
||||
expect(recoveredAlert1State.duration).toEqual('1696536188716000000');
|
||||
expect(recoveredAlert2State.duration).toEqual('1696422548716000000');
|
||||
|
||||
expect(recoveredAlert1State.end).toEqual('2023-10-03T20:03:08.716Z');
|
||||
expect(recoveredAlert2State.end).toEqual('2023-10-03T20:03:08.716Z');
|
||||
});
|
||||
|
||||
test('does not update duration or set end in recovered alerts if start is not available', () => {
|
||||
const activeAlert = new Alert<AlertInstanceState, AlertInstanceContext>('1');
|
||||
const recoveredAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
|
||||
|
|
|
@ -22,6 +22,7 @@ interface ProcessAlertsOpts<
|
|||
hasReachedAlertLimit: boolean;
|
||||
alertLimit: number;
|
||||
autoRecoverAlerts: boolean;
|
||||
startedAt?: string | null;
|
||||
flappingSettings: RulesSettingsFlappingProperties;
|
||||
maintenanceWindowIds: string[];
|
||||
}
|
||||
|
@ -52,6 +53,7 @@ export function processAlerts<
|
|||
autoRecoverAlerts,
|
||||
flappingSettings,
|
||||
maintenanceWindowIds,
|
||||
startedAt,
|
||||
}: ProcessAlertsOpts<State, Context>): ProcessAlertsResult<
|
||||
State,
|
||||
Context,
|
||||
|
@ -65,7 +67,8 @@ export function processAlerts<
|
|||
previouslyRecoveredAlerts,
|
||||
alertLimit,
|
||||
flappingSettings,
|
||||
maintenanceWindowIds
|
||||
maintenanceWindowIds,
|
||||
startedAt
|
||||
)
|
||||
: processAlertsHelper(
|
||||
alerts,
|
||||
|
@ -73,7 +76,8 @@ export function processAlerts<
|
|||
previouslyRecoveredAlerts,
|
||||
autoRecoverAlerts,
|
||||
flappingSettings,
|
||||
maintenanceWindowIds
|
||||
maintenanceWindowIds,
|
||||
startedAt
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -88,12 +92,13 @@ function processAlertsHelper<
|
|||
previouslyRecoveredAlerts: Record<string, Alert<State, Context>>,
|
||||
autoRecoverAlerts: boolean,
|
||||
flappingSettings: RulesSettingsFlappingProperties,
|
||||
maintenanceWindowIds: string[]
|
||||
maintenanceWindowIds: string[],
|
||||
startedAt?: string | null
|
||||
): ProcessAlertsResult<State, Context, ActionGroupIds, RecoveryActionGroupId> {
|
||||
const existingAlertIds = new Set(Object.keys(existingAlerts));
|
||||
const previouslyRecoveredAlertsIds = new Set(Object.keys(previouslyRecoveredAlerts));
|
||||
|
||||
const currentTime = new Date().toISOString();
|
||||
const currentTime = startedAt ?? new Date().toISOString();
|
||||
const newAlerts: Record<string, Alert<State, Context, ActionGroupIds>> = {};
|
||||
const activeAlerts: Record<string, Alert<State, Context, ActionGroupIds>> = {};
|
||||
const currentRecoveredAlerts: Record<string, Alert<State, Context, RecoveryActionGroupId>> = {};
|
||||
|
@ -183,7 +188,8 @@ function processAlertsLimitReached<
|
|||
previouslyRecoveredAlerts: Record<string, Alert<State, Context>>,
|
||||
alertLimit: number,
|
||||
flappingSettings: RulesSettingsFlappingProperties,
|
||||
maintenanceWindowIds: string[]
|
||||
maintenanceWindowIds: string[],
|
||||
startedAt?: string | null
|
||||
): ProcessAlertsResult<State, Context, ActionGroupIds, RecoveryActionGroupId> {
|
||||
const existingAlertIds = new Set(Object.keys(existingAlerts));
|
||||
const previouslyRecoveredAlertsIds = new Set(Object.keys(previouslyRecoveredAlerts));
|
||||
|
@ -193,7 +199,7 @@ function processAlertsLimitReached<
|
|||
// - pass through all existing alerts as active
|
||||
// - add any new alerts, up to the max allowed
|
||||
|
||||
const currentTime = new Date().toISOString();
|
||||
const currentTime = startedAt ?? new Date().toISOString();
|
||||
const newAlerts: Record<string, Alert<State, Context, ActionGroupIds>> = {};
|
||||
|
||||
// all existing alerts stay active
|
||||
|
|
|
@ -379,6 +379,7 @@ export class TaskRunner<
|
|||
maxAlerts: this.maxAlerts,
|
||||
ruleLabel,
|
||||
flappingSettings,
|
||||
startedAt: this.taskInstance.startedAt!,
|
||||
activeAlertsFromState: alertRawInstances,
|
||||
recoveredAlertsFromState: alertRecoveredRawInstances,
|
||||
});
|
||||
|
|
|
@ -767,6 +767,7 @@ describe('Task Runner', () => {
|
|||
maxAlerts: 1000,
|
||||
recoveredAlertsFromState: {},
|
||||
ruleLabel: "test:1: 'rule-name'",
|
||||
startedAt: new Date(DATE_1970),
|
||||
});
|
||||
expect(alertsClientNotToUse.initializeExecution).not.toHaveBeenCalled();
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import {
|
|||
} from '@kbn/rule-registry-plugin/common/parse_technical_fields';
|
||||
import { ES_FIELD_TYPES } from '@kbn/field-types';
|
||||
import { set } from '@kbn/safer-lodash-set';
|
||||
import { Alert } from '@kbn/alerts-as-data-utils';
|
||||
import { ParsedExperimentalFields } from '@kbn/rule-registry-plugin/common/parse_experimental_fields';
|
||||
import { LINK_TO_METRICS_EXPLORER } from '../../../../common/alerting/metrics';
|
||||
import { getInventoryViewInAppUrl } from '../../../../common/alerting/metrics/alert_link';
|
||||
|
@ -223,8 +224,10 @@ export const flattenAdditionalContext = (
|
|||
return additionalContext ? flattenObject(additionalContext) : {};
|
||||
};
|
||||
|
||||
export const getContextForRecoveredAlerts = (
|
||||
alertHitSource: Partial<ParsedTechnicalFields & ParsedExperimentalFields> | undefined | null
|
||||
export const getContextForRecoveredAlerts = <
|
||||
T extends Alert | (ParsedTechnicalFields & ParsedExperimentalFields)
|
||||
>(
|
||||
alertHitSource: Partial<T> | undefined | null
|
||||
): AdditionalContext => {
|
||||
const alert = alertHitSource ? unflattenObject(alertHitSource) : undefined;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -6,7 +6,7 @@
|
|||
*/
|
||||
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { ALERT_ACTION_GROUP, ALERT_EVALUATION_VALUES, ALERT_REASON } from '@kbn/rule-data-utils';
|
||||
import { ALERT_EVALUATION_VALUES, ALERT_REASON } from '@kbn/rule-data-utils';
|
||||
import { isEqual } from 'lodash';
|
||||
import {
|
||||
ActionGroupIdsOf,
|
||||
|
@ -14,9 +14,10 @@ import {
|
|||
AlertInstanceState as AlertState,
|
||||
RecoveredActionGroup,
|
||||
} from '@kbn/alerting-plugin/common';
|
||||
import { Alert, RuleTypeState } from '@kbn/alerting-plugin/server';
|
||||
import { RuleExecutorOptions, RuleTypeState } from '@kbn/alerting-plugin/server';
|
||||
import type { TimeUnitChar } from '@kbn/observability-plugin/common';
|
||||
import { getAlertUrl } from '@kbn/observability-plugin/common';
|
||||
import { ObservabilityMetricsAlert } from '@kbn/alerts-as-data-utils';
|
||||
import { getOriginalActionGroup } from '../../../utils/get_original_action_group';
|
||||
import { AlertStates, Comparator } from '../../../../common/alerting/metrics';
|
||||
import { createFormatter } from '../../../../common/formatters';
|
||||
|
@ -44,6 +45,14 @@ import { EvaluatedRuleParams, evaluateRule } from './lib/evaluate_rule';
|
|||
import { MissingGroupsRecord } from './lib/check_missing_group';
|
||||
import { convertStringsToMissingGroupsRecord } from './lib/convert_strings_to_missing_groups_record';
|
||||
|
||||
export type MetricThresholdAlert = Omit<
|
||||
ObservabilityMetricsAlert,
|
||||
'kibana.alert.evaluation.values'
|
||||
> & {
|
||||
// Defining a custom type for this because the schema generation script doesn't allow explicit null values
|
||||
'kibana.alert.evaluation.values'?: Array<number | null>;
|
||||
};
|
||||
|
||||
export type MetricThresholdRuleParams = Record<string, any>;
|
||||
export type MetricThresholdRuleTypeState = RuleTypeState & {
|
||||
lastRunTimestamp?: number;
|
||||
|
@ -68,28 +77,27 @@ type MetricThresholdAllowedActionGroups = ActionGroupIdsOf<
|
|||
typeof FIRED_ACTIONS | typeof WARNING_ACTIONS | typeof NO_DATA_ACTIONS
|
||||
>;
|
||||
|
||||
type MetricThresholdAlert = Alert<
|
||||
MetricThresholdAlertState,
|
||||
MetricThresholdAlertContext,
|
||||
MetricThresholdAllowedActionGroups
|
||||
>;
|
||||
|
||||
type MetricThresholdAlertFactory = (
|
||||
type MetricThresholdAlertReporter = (
|
||||
id: string,
|
||||
reason: string,
|
||||
actionGroup: MetricThresholdActionGroup,
|
||||
context: MetricThresholdAlertContext,
|
||||
additionalContext?: AdditionalContext | null,
|
||||
evaluationValues?: Array<number | null>
|
||||
) => MetricThresholdAlert;
|
||||
) => void;
|
||||
|
||||
export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
||||
libs.metricsRules.createLifecycleRuleExecutor<
|
||||
MetricThresholdRuleParams,
|
||||
MetricThresholdRuleTypeState,
|
||||
MetricThresholdAlertState,
|
||||
MetricThresholdAlertContext,
|
||||
MetricThresholdAllowedActionGroups
|
||||
>(async function (options) {
|
||||
export const createMetricThresholdExecutor =
|
||||
(libs: InfraBackendLibs) =>
|
||||
async (
|
||||
options: RuleExecutorOptions<
|
||||
MetricThresholdRuleParams,
|
||||
MetricThresholdRuleTypeState,
|
||||
MetricThresholdAlertState,
|
||||
MetricThresholdAlertContext,
|
||||
MetricThresholdAllowedActionGroups,
|
||||
MetricThresholdAlert
|
||||
>
|
||||
) => {
|
||||
const startTime = Date.now();
|
||||
|
||||
const {
|
||||
|
@ -110,30 +118,43 @@ export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
|||
executionId,
|
||||
});
|
||||
|
||||
const {
|
||||
alertWithLifecycle,
|
||||
savedObjectsClient,
|
||||
getAlertUuid,
|
||||
getAlertStartedDate,
|
||||
getAlertByAlertUuid,
|
||||
} = services;
|
||||
const { alertsClient, savedObjectsClient } = services;
|
||||
if (!alertsClient) {
|
||||
throw new Error(`Expected alertsClient to be defined but it was not!`);
|
||||
}
|
||||
|
||||
const alertFactory: MetricThresholdAlertFactory = (
|
||||
const alertReporter: MetricThresholdAlertReporter = async (
|
||||
id,
|
||||
reason,
|
||||
actionGroup,
|
||||
contextWithoutAlertDetailsUrl,
|
||||
additionalContext,
|
||||
evaluationValues
|
||||
) =>
|
||||
alertWithLifecycle({
|
||||
) => {
|
||||
const { uuid, start } = alertsClient.report({
|
||||
id,
|
||||
fields: {
|
||||
actionGroup,
|
||||
});
|
||||
|
||||
alertsClient.setAlertData({
|
||||
id,
|
||||
payload: {
|
||||
[ALERT_REASON]: reason,
|
||||
[ALERT_ACTION_GROUP]: actionGroup,
|
||||
[ALERT_EVALUATION_VALUES]: evaluationValues,
|
||||
...flattenAdditionalContext(additionalContext),
|
||||
},
|
||||
context: {
|
||||
...contextWithoutAlertDetailsUrl,
|
||||
alertDetailsUrl: await getAlertUrl(
|
||||
uuid,
|
||||
spaceId,
|
||||
start ?? startedAt.toISOString(),
|
||||
libs.alertsLocator,
|
||||
libs.basePath.publicBaseUrl
|
||||
),
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const {
|
||||
sourceId,
|
||||
|
@ -154,19 +175,7 @@ export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
|||
const timestamp = startedAt.toISOString();
|
||||
const actionGroupId = FIRED_ACTIONS_ID; // Change this to an Error action group when able
|
||||
const reason = buildInvalidQueryAlertReason(params.filterQueryText);
|
||||
const alert = alertFactory(UNGROUPED_FACTORY_KEY, reason, actionGroupId);
|
||||
const alertUuid = getAlertUuid(UNGROUPED_FACTORY_KEY);
|
||||
const indexedStartedAt =
|
||||
getAlertStartedDate(UNGROUPED_FACTORY_KEY) ?? startedAt.toISOString();
|
||||
|
||||
alert.scheduleActions(actionGroupId, {
|
||||
alertDetailsUrl: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
libs.alertsLocator,
|
||||
libs.basePath.publicBaseUrl
|
||||
),
|
||||
const alertContext = {
|
||||
alertState: stateToAlertMessage[AlertStates.ERROR],
|
||||
group: UNGROUPED_FACTORY_KEY,
|
||||
metric: mapToConditionsLookup(criteria, (c) => c.metric),
|
||||
|
@ -174,7 +183,9 @@ export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
|||
timestamp,
|
||||
value: null,
|
||||
viewInAppUrl: getViewInMetricsAppUrl(libs.basePath, spaceId),
|
||||
});
|
||||
};
|
||||
|
||||
await alertReporter(UNGROUPED_FACTORY_KEY, reason, actionGroupId, alertContext);
|
||||
|
||||
return {
|
||||
state: {
|
||||
|
@ -317,25 +328,7 @@ export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
|||
return acc;
|
||||
}, []);
|
||||
|
||||
const alert = alertFactory(
|
||||
`${group}`,
|
||||
reason,
|
||||
actionGroupId,
|
||||
additionalContext,
|
||||
evaluationValues
|
||||
);
|
||||
const alertUuid = getAlertUuid(group);
|
||||
const indexedStartedAt = getAlertStartedDate(group) ?? startedAt.toISOString();
|
||||
scheduledActionsCount++;
|
||||
|
||||
alert.scheduleActions(actionGroupId, {
|
||||
alertDetailsUrl: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
libs.alertsLocator,
|
||||
libs.basePath.publicBaseUrl
|
||||
),
|
||||
const alertContext = {
|
||||
alertState: stateToAlertMessage[nextState],
|
||||
group,
|
||||
groupByKeys: groupByKeysObjectMapping[group],
|
||||
|
@ -365,29 +358,37 @@ export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
|||
}),
|
||||
viewInAppUrl: getViewInMetricsAppUrl(libs.basePath, spaceId),
|
||||
...additionalContext,
|
||||
});
|
||||
};
|
||||
|
||||
await alertReporter(
|
||||
`${group}`,
|
||||
reason,
|
||||
actionGroupId,
|
||||
alertContext,
|
||||
additionalContext,
|
||||
evaluationValues
|
||||
);
|
||||
scheduledActionsCount++;
|
||||
}
|
||||
}
|
||||
|
||||
const { getRecoveredAlerts } = services.alertFactory.done();
|
||||
const recoveredAlerts = getRecoveredAlerts();
|
||||
|
||||
const recoveredAlerts = alertsClient?.getRecoveredAlerts() ?? [];
|
||||
const groupByKeysObjectForRecovered = getGroupByObject(
|
||||
params.groupBy,
|
||||
new Set<string>(recoveredAlerts.map((recoveredAlert) => recoveredAlert.getId()))
|
||||
new Set<string>(recoveredAlerts.map((recoveredAlert) => recoveredAlert.alert.getId()))
|
||||
);
|
||||
|
||||
for (const alert of recoveredAlerts) {
|
||||
const recoveredAlertId = alert.getId();
|
||||
const alertUuid = getAlertUuid(recoveredAlertId);
|
||||
for (const recoveredAlert of recoveredAlerts) {
|
||||
const recoveredAlertId = recoveredAlert.alert.getId();
|
||||
const alertUuid = recoveredAlert.alert.getUuid();
|
||||
const timestamp = startedAt.toISOString();
|
||||
const indexedStartedAt = getAlertStartedDate(recoveredAlertId) ?? timestamp;
|
||||
const indexedStartedAt = recoveredAlert.alert.getStart() ?? timestamp;
|
||||
|
||||
const alertHits = alertUuid ? await getAlertByAlertUuid(alertUuid) : undefined;
|
||||
const alertHits = recoveredAlert.hit;
|
||||
const additionalContext = getContextForRecoveredAlerts(alertHits);
|
||||
const originalActionGroup = getOriginalActionGroup(alertHits);
|
||||
|
||||
alert.setContext({
|
||||
recoveredAlert.alert.setContext({
|
||||
alertDetailsUrl: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
|
@ -427,7 +428,7 @@ export const createMetricThresholdExecutor = (libs: InfraBackendLibs) =>
|
|||
filterQuery: params.filterQuery,
|
||||
},
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
export const FIRED_ACTIONS = {
|
||||
id: 'metrics.threshold.fired',
|
||||
|
|
|
@ -11,6 +11,7 @@ import { i18n } from '@kbn/i18n';
|
|||
import { ActionGroupIdsOf } from '@kbn/alerting-plugin/common';
|
||||
import {
|
||||
GetViewInAppRelativeUrlFnOpts,
|
||||
IRuleTypeAlerts,
|
||||
PluginSetupContract,
|
||||
RuleType,
|
||||
} from '@kbn/alerting-plugin/server';
|
||||
|
@ -44,6 +45,7 @@ import {
|
|||
FIRED_ACTIONS,
|
||||
WARNING_ACTIONS,
|
||||
NO_DATA_ACTIONS,
|
||||
MetricThresholdAlert,
|
||||
} from './metric_threshold_executor';
|
||||
import { MetricsRulesTypeAlertDefinition } from '../register_rule_types';
|
||||
import { O11Y_AAD_FIELDS } from '../../../../common/constants';
|
||||
|
@ -197,7 +199,10 @@ export async function registerMetricThresholdRuleType(
|
|||
},
|
||||
category: DEFAULT_APP_CATEGORIES.observability.id,
|
||||
producer: 'infrastructure',
|
||||
alerts: MetricsRulesTypeAlertDefinition,
|
||||
alerts: {
|
||||
...MetricsRulesTypeAlertDefinition,
|
||||
shouldWrite: true,
|
||||
} as IRuleTypeAlerts<MetricThresholdAlert>,
|
||||
getViewInAppRelativeUrl: ({ rule }: GetViewInAppRelativeUrlFnOpts<{}>) =>
|
||||
observabilityPaths.ruleDetails(rule.id),
|
||||
});
|
||||
|
|
|
@ -29,6 +29,7 @@ export const MetricsRulesTypeAlertDefinition: IRuleTypeAlerts = {
|
|||
mappings: { fieldMap: legacyExperimentalFieldMap },
|
||||
useEcs: true,
|
||||
useLegacyAlerts: true,
|
||||
shouldWrite: false,
|
||||
};
|
||||
|
||||
const registerRuleTypes = (
|
||||
|
|
|
@ -5,12 +5,15 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { Alert } from '@kbn/alerts-as-data-utils';
|
||||
import { ALERT_ACTION_GROUP } from '@kbn/rule-data-utils';
|
||||
import { ParsedTechnicalFields } from '@kbn/rule-registry-plugin/common';
|
||||
import { ParsedExperimentalFields } from '@kbn/rule-registry-plugin/common/parse_experimental_fields';
|
||||
|
||||
export const getOriginalActionGroup = (
|
||||
alertHitSource: Partial<ParsedTechnicalFields & ParsedExperimentalFields> | undefined | null
|
||||
export const getOriginalActionGroup = <
|
||||
T extends Alert | (ParsedTechnicalFields & ParsedExperimentalFields)
|
||||
>(
|
||||
alertHitSource: Partial<T> | undefined | null
|
||||
) => {
|
||||
return alertHitSource?.[ALERT_ACTION_GROUP];
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue