[RAC][UPTIME] Add {{context.reason}} as an action variable to the rule templating language (#126124)

* [RAC][UPTIME] Add reason msg for status check

* [RAC][UPTIME] Extract action variables into another file

* Fix tests

* Fix tests

* Update i18n files

* Fix typo in the tests

* Fix tests

* Cover the autoGenerated case and update its tests

* Update duration anomaly tests
This commit is contained in:
Faisal Kanout 2022-02-24 18:05:36 +03:00 committed by GitHub
parent 408969601d
commit 5142f204d8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 111 additions and 28 deletions

View file

@ -0,0 +1,43 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { i18n } from '@kbn/i18n';
export const MESSAGE = 'message';
export const MONITOR_WITH_GEO = 'downMonitorsWithGeo';
export const ALERT_REASON_MSG = 'reason';
export const ACTION_VARIABLES = {
[MESSAGE]: {
name: MESSAGE,
description: i18n.translate(
'xpack.uptime.alerts.monitorStatus.actionVariables.context.message.description',
{
defaultMessage: 'A generated message summarizing the currently down monitors',
}
),
},
[MONITOR_WITH_GEO]: {
name: MONITOR_WITH_GEO,
description: i18n.translate(
'xpack.uptime.alerts.monitorStatus.actionVariables.context.downMonitorsWithGeo.description',
{
defaultMessage:
'A generated summary that shows some or all of the monitors detected as "down" by the alert',
}
),
},
[ALERT_REASON_MSG]: {
name: ALERT_REASON_MSG,
description: i18n.translate(
'xpack.uptime.alerts.monitorStatus.actionVariables.context.alertReasonMessage.description',
{
defaultMessage: 'A concise description of the reason for the alert',
}
),
},
};

View file

@ -16,6 +16,7 @@ import { DynamicSettings } from '../../../common/runtime_types';
import { createRuleTypeMocks, bootstrapDependencies } from './test_utils';
import { getSeverityType } from '../../../../ml/common/util/anomaly_utils';
import { Ping } from '../../../common/runtime_types/ping';
import { ALERT_REASON_MSG } from './action_variables';
interface MockAnomaly {
severity: AnomaliesTableRecord['severity'];
@ -157,6 +158,7 @@ describe('duration anomaly alert', () => {
);
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(2);
const reasonMessages: string[] = [];
mockAnomaliesResult.anomalies.forEach((anomaly, index) => {
const slowestResponse = Math.round(anomaly.actualSort / 1000);
const typicalResponse = Math.round(anomaly.typicalSort / 1000);
@ -180,6 +182,7 @@ Response times as high as ${slowestResponse} ms have been detected from location
},
id: `${DURATION_ANOMALY.id}${index}`,
});
expect(alertInstanceMock.replaceState).toBeCalledWith({
firstCheckedAt: 'date',
firstTriggeredAt: undefined,
@ -198,9 +201,35 @@ Response times as high as ${slowestResponse} ms have been detected from location
slowestAnomalyResponse: `${slowestResponse} ms`,
bucketSpan: anomaly.source.bucket_span,
});
const reasonMsg = `Abnormal (${getSeverityType(
anomaly.severity
)} level) response time detected on uptime-monitor with url ${
mockPing.url?.full
} at date. Anomaly severity score is ${anomaly.severity}.
Response times as high as ${slowestResponse} ms have been detected from location ${
anomaly.entityValue
}. Expected response time is ${typicalResponse} ms.`;
reasonMessages.push(reasonMsg);
});
expect(alertInstanceMock.scheduleActions).toHaveBeenCalledTimes(2);
expect(alertInstanceMock.scheduleActions).toBeCalledWith(DURATION_ANOMALY.id);
expect(alertInstanceMock.scheduleActions.mock.calls[0]).toMatchInlineSnapshot(`
Array [
"xpack.uptime.alerts.actionGroups.durationAnomaly",
Object {
"${ALERT_REASON_MSG}": "${reasonMessages[0]}",
},
]
`);
expect(alertInstanceMock.scheduleActions.mock.calls[1]).toMatchInlineSnapshot(`
Array [
"xpack.uptime.alerts.actionGroups.durationAnomaly",
Object {
"${ALERT_REASON_MSG}": "${reasonMessages[1]}",
},
]
`);
});
});
});

View file

@ -26,6 +26,7 @@ import { getMLJobId } from '../../../common/lib';
import { DurationAnomalyTranslations as CommonDurationAnomalyTranslations } from '../../../common/translations';
import { createUptimeESClient } from '../lib';
import { ALERT_REASON_MSG, ACTION_VARIABLES } from './action_variables';
export type ActionGroupIds = ActionGroupIdsOf<typeof DURATION_ANOMALY>;
@ -92,7 +93,7 @@ export const durationAnomalyAlertFactory: UptimeAlertTypeFactory<ActionGroupIds>
},
],
actionVariables: {
context: [],
context: [ACTION_VARIABLES[ALERT_REASON_MSG]],
state: [...durationAnomalyTranslations.actionVariables, ...commonStateTranslations],
},
isExportable: true,
@ -122,6 +123,10 @@ export const durationAnomalyAlertFactory: UptimeAlertTypeFactory<ActionGroupIds>
anomalies.forEach((anomaly, index) => {
const summary = getAnomalySummary(anomaly, monitorInfo);
const alertReasonMessage = generateAlertMessage(
CommonDurationAnomalyTranslations.defaultActionMessage,
summary
);
const alertInstance = alertWithLifecycle({
id: DURATION_ANOMALY.id + index,
@ -133,17 +138,16 @@ export const durationAnomalyAlertFactory: UptimeAlertTypeFactory<ActionGroupIds>
'anomaly.bucket_span.minutes': summary.bucketSpan,
[ALERT_EVALUATION_VALUE]: anomaly.actualSort,
[ALERT_EVALUATION_THRESHOLD]: anomaly.typicalSort,
[ALERT_REASON]: generateAlertMessage(
CommonDurationAnomalyTranslations.defaultActionMessage,
summary
),
[ALERT_REASON]: alertReasonMessage,
},
});
alertInstance.replaceState({
...updateState(state, false),
...summary,
});
alertInstance.scheduleActions(DURATION_ANOMALY.id);
alertInstance.scheduleActions(DURATION_ANOMALY.id, {
[ALERT_REASON_MSG]: alertReasonMessage,
});
});
}

View file

@ -241,6 +241,9 @@ describe('status check alert', () => {
expect(alertInstanceMock.scheduleActions.mock.calls[0]).toMatchInlineSnapshot(`
Array [
"xpack.uptime.alerts.actionGroups.monitorStatus",
Object {
"reason": "First from harrisburg failed 234 times in the last 15 mins. Alert when > 5.",
},
]
`);
});
@ -308,6 +311,9 @@ describe('status check alert', () => {
expect(alertInstanceMock.scheduleActions.mock.calls[0]).toMatchInlineSnapshot(`
Array [
"xpack.uptime.alerts.actionGroups.monitorStatus",
Object {
"reason": "First from harrisburg failed 234 times in the last 15m. Alert when > 5.",
},
]
`);
});
@ -776,15 +782,27 @@ describe('status check alert', () => {
Array [
Array [
"xpack.uptime.alerts.actionGroups.monitorStatus",
Object {
"reason": "Foo from harrisburg 35 days availability is 99.28%. Alert when < 99.34%.",
},
],
Array [
"xpack.uptime.alerts.actionGroups.monitorStatus",
Object {
"reason": "Foo from fairbanks 35 days availability is 98.03%. Alert when < 99.34%.",
},
],
Array [
"xpack.uptime.alerts.actionGroups.monitorStatus",
Object {
"reason": "Unreliable from fairbanks 35 days availability is 90.92%. Alert when < 99.34%.",
},
],
Array [
"xpack.uptime.alerts.actionGroups.monitorStatus",
Object {
"reason": "no-name from fairbanks 35 days availability is 90.92%. Alert when < 99.34%.",
},
],
]
`);

View file

@ -36,6 +36,7 @@ import { getUptimeIndexPattern, IndexPatternTitleAndFields } from '../requests/g
import { UMServerLibs, UptimeESClient, createUptimeESClient } from '../lib';
import { ActionGroupIdsOf } from '../../../../alerting/common';
import { formatDurationFromTimeUnitChar, TimeUnitChar } from '../../../../observability/common';
import { ALERT_REASON_MSG, MESSAGE, MONITOR_WITH_GEO, ACTION_VARIABLES } from './action_variables';
export type ActionGroupIds = ActionGroupIdsOf<typeof MONITOR_STATUS>;
/**
@ -268,25 +269,9 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
],
actionVariables: {
context: [
{
name: 'message',
description: i18n.translate(
'xpack.uptime.alerts.monitorStatus.actionVariables.context.message.description',
{
defaultMessage: 'A generated message summarizing the currently down monitors',
}
),
},
{
name: 'downMonitorsWithGeo',
description: i18n.translate(
'xpack.uptime.alerts.monitorStatus.actionVariables.context.downMonitorsWithGeo.description',
{
defaultMessage:
'A generated summary that shows some or all of the monitors detected as "down" by the alert',
}
),
},
ACTION_VARIABLES[MESSAGE],
ACTION_VARIABLES[MONITOR_WITH_GEO],
ACTION_VARIABLES[ALERT_REASON_MSG],
],
state: [...commonMonitorStateI18, ...commonStateTranslations],
},
@ -375,7 +360,9 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
...updateState(state, true),
});
alert.scheduleActions(MONITOR_STATUS.id);
alert.scheduleActions(MONITOR_STATUS.id, {
[ALERT_REASON_MSG]: monitorSummary.reason,
});
}
return updateState(state, downMonitorsByLocation.length > 0);
}
@ -432,7 +419,9 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
statusMessage,
});
alert.scheduleActions(MONITOR_STATUS.id);
alert.scheduleActions(MONITOR_STATUS.id, {
[ALERT_REASON_MSG]: monitorSummary.reason,
});
});
return updateState(state, downMonitorsByLocation.length > 0);